[PATCH] b44: add wol
[safe/jmp/linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.00"
33 #define DRV_MODULE_RELDATE      "Apr 7, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 static char version[] __devinitdata =
79         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
80
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
85
86 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
89
90 static struct pci_device_id b44_pci_tbl[] = {
91         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97         { }     /* terminate list with empty entry */
98 };
99
100 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
101
102 static void b44_halt(struct b44 *);
103 static void b44_init_rings(struct b44 *);
104 static void b44_init_hw(struct b44 *);
105
106 static int dma_desc_align_mask;
107 static int dma_desc_sync_size;
108
109 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110 #define _B44(x...)      # x,
111 B44_STAT_REG_DECLARE
112 #undef _B44
113 };
114
115 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
116                                                 dma_addr_t dma_base,
117                                                 unsigned long offset,
118                                                 enum dma_data_direction dir)
119 {
120         dma_sync_single_range_for_device(&pdev->dev, dma_base,
121                                          offset & dma_desc_align_mask,
122                                          dma_desc_sync_size, dir);
123 }
124
125 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
126                                              dma_addr_t dma_base,
127                                              unsigned long offset,
128                                              enum dma_data_direction dir)
129 {
130         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131                                       offset & dma_desc_align_mask,
132                                       dma_desc_sync_size, dir);
133 }
134
135 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
136 {
137         return readl(bp->regs + reg);
138 }
139
140 static inline void bw32(const struct b44 *bp,
141                         unsigned long reg, unsigned long val)
142 {
143         writel(val, bp->regs + reg);
144 }
145
146 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147                         u32 bit, unsigned long timeout, const int clear)
148 {
149         unsigned long i;
150
151         for (i = 0; i < timeout; i++) {
152                 u32 val = br32(bp, reg);
153
154                 if (clear && !(val & bit))
155                         break;
156                 if (!clear && (val & bit))
157                         break;
158                 udelay(10);
159         }
160         if (i == timeout) {
161                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
162                        "%lx to %s.\n",
163                        bp->dev->name,
164                        bit, reg,
165                        (clear ? "clear" : "set"));
166                 return -ENODEV;
167         }
168         return 0;
169 }
170
171 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
172  * buzz words used on this company's website :-)
173  *
174  * All of these routines must be invoked with bp->lock held and
175  * interrupts disabled.
176  */
177
178 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
179 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
180
181 static u32 ssb_get_core_rev(struct b44 *bp)
182 {
183         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
184 }
185
186 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
187 {
188         u32 bar_orig, pci_rev, val;
189
190         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192         pci_rev = ssb_get_core_rev(bp);
193
194         val = br32(bp, B44_SBINTVEC);
195         val |= cores;
196         bw32(bp, B44_SBINTVEC, val);
197
198         val = br32(bp, SSB_PCI_TRANS_2);
199         val |= SSB_PCI_PREF | SSB_PCI_BURST;
200         bw32(bp, SSB_PCI_TRANS_2, val);
201
202         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
203
204         return pci_rev;
205 }
206
207 static void ssb_core_disable(struct b44 *bp)
208 {
209         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
210                 return;
211
212         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
217         br32(bp, B44_SBTMSLOW);
218         udelay(1);
219         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220         br32(bp, B44_SBTMSLOW);
221         udelay(1);
222 }
223
224 static void ssb_core_reset(struct b44 *bp)
225 {
226         u32 val;
227
228         ssb_core_disable(bp);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230         br32(bp, B44_SBTMSLOW);
231         udelay(1);
232
233         /* Clear SERR if set, this is a hw bug workaround.  */
234         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235                 bw32(bp, B44_SBTMSHIGH, 0);
236
237         val = br32(bp, B44_SBIMSTATE);
238         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
240
241         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242         br32(bp, B44_SBTMSLOW);
243         udelay(1);
244
245         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246         br32(bp, B44_SBTMSLOW);
247         udelay(1);
248 }
249
250 static int ssb_core_unit(struct b44 *bp)
251 {
252 #if 0
253         u32 val = br32(bp, B44_SBADMATCH0);
254         u32 base;
255
256         type = val & SBADMATCH0_TYPE_MASK;
257         switch (type) {
258         case 0:
259                 base = val & SBADMATCH0_BS0_MASK;
260                 break;
261
262         case 1:
263                 base = val & SBADMATCH0_BS1_MASK;
264                 break;
265
266         case 2:
267         default:
268                 base = val & SBADMATCH0_BS2_MASK;
269                 break;
270         };
271 #endif
272         return 0;
273 }
274
275 static int ssb_is_core_up(struct b44 *bp)
276 {
277         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
278                 == SBTMSLOW_CLOCK);
279 }
280
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
282 {
283         u32 val;
284
285         val  = ((u32) data[2]) << 24;
286         val |= ((u32) data[3]) << 16;
287         val |= ((u32) data[4]) <<  8;
288         val |= ((u32) data[5]) <<  0;
289         bw32(bp, B44_CAM_DATA_LO, val);
290         val = (CAM_DATA_HI_VALID |
291                (((u32) data[0]) << 8) |
292                (((u32) data[1]) << 0));
293         bw32(bp, B44_CAM_DATA_HI, val);
294         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295                             (index << CAM_CTRL_INDEX_SHIFT)));
296         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
297 }
298
299 static inline void __b44_disable_ints(struct b44 *bp)
300 {
301         bw32(bp, B44_IMASK, 0);
302 }
303
304 static void b44_disable_ints(struct b44 *bp)
305 {
306         __b44_disable_ints(bp);
307
308         /* Flush posted writes. */
309         br32(bp, B44_IMASK);
310 }
311
312 static void b44_enable_ints(struct b44 *bp)
313 {
314         bw32(bp, B44_IMASK, bp->imask);
315 }
316
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
318 {
319         int err;
320
321         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325                              (reg << MDIO_DATA_RA_SHIFT) |
326                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
329
330         return err;
331 }
332
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
334 {
335         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339                              (reg << MDIO_DATA_RA_SHIFT) |
340                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341                              (val & MDIO_DATA_DATA)));
342         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343 }
344
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347  * due to code existing before miilib use was added to this driver.
348  * Someone should remove this artificial driver limitation in
349  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
350  */
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
352 {
353         u32 val;
354         struct b44 *bp = netdev_priv(dev);
355         int rc = b44_readphy(bp, location, &val);
356         if (rc)
357                 return 0xffffffff;
358         return val;
359 }
360
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
362                          int val)
363 {
364         struct b44 *bp = netdev_priv(dev);
365         b44_writephy(bp, location, val);
366 }
367
368 static int b44_phy_reset(struct b44 *bp)
369 {
370         u32 val;
371         int err;
372
373         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
374         if (err)
375                 return err;
376         udelay(100);
377         err = b44_readphy(bp, MII_BMCR, &val);
378         if (!err) {
379                 if (val & BMCR_RESET) {
380                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
381                                bp->dev->name);
382                         err = -ENODEV;
383                 }
384         }
385
386         return 0;
387 }
388
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
390 {
391         u32 val;
392
393         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394         bp->flags |= pause_flags;
395
396         val = br32(bp, B44_RXCONFIG);
397         if (pause_flags & B44_FLAG_RX_PAUSE)
398                 val |= RXCONFIG_FLOW;
399         else
400                 val &= ~RXCONFIG_FLOW;
401         bw32(bp, B44_RXCONFIG, val);
402
403         val = br32(bp, B44_MAC_FLOW);
404         if (pause_flags & B44_FLAG_TX_PAUSE)
405                 val |= (MAC_FLOW_PAUSE_ENAB |
406                         (0xc0 & MAC_FLOW_RX_HI_WATER));
407         else
408                 val &= ~MAC_FLOW_PAUSE_ENAB;
409         bw32(bp, B44_MAC_FLOW, val);
410 }
411
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
413 {
414         u32 pause_enab = 0;
415
416         /* The driver supports only rx pause by default because
417            the b44 mac tx pause mechanism generates excessive
418            pause frames.
419            Use ethtool to turn on b44 tx pause if necessary.
420          */
421         if ((local & ADVERTISE_PAUSE_CAP) &&
422             (local & ADVERTISE_PAUSE_ASYM)){
423                 if ((remote & LPA_PAUSE_ASYM) &&
424                     !(remote & LPA_PAUSE_CAP))
425                         pause_enab |= B44_FLAG_RX_PAUSE;
426         }
427
428         __b44_set_flow_ctrl(bp, pause_enab);
429 }
430
431 static int b44_setup_phy(struct b44 *bp)
432 {
433         u32 val;
434         int err;
435
436         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
437                 goto out;
438         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
440                 goto out;
441         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
442                 goto out;
443         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444                                 val | MII_TLEDCTRL_ENABLE)) != 0)
445                 goto out;
446
447         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448                 u32 adv = ADVERTISE_CSMA;
449
450                 if (bp->flags & B44_FLAG_ADV_10HALF)
451                         adv |= ADVERTISE_10HALF;
452                 if (bp->flags & B44_FLAG_ADV_10FULL)
453                         adv |= ADVERTISE_10FULL;
454                 if (bp->flags & B44_FLAG_ADV_100HALF)
455                         adv |= ADVERTISE_100HALF;
456                 if (bp->flags & B44_FLAG_ADV_100FULL)
457                         adv |= ADVERTISE_100FULL;
458
459                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
461
462                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
463                         goto out;
464                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465                                                        BMCR_ANRESTART))) != 0)
466                         goto out;
467         } else {
468                 u32 bmcr;
469
470                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
471                         goto out;
472                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473                 if (bp->flags & B44_FLAG_100_BASE_T)
474                         bmcr |= BMCR_SPEED100;
475                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476                         bmcr |= BMCR_FULLDPLX;
477                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
478                         goto out;
479
480                 /* Since we will not be negotiating there is no safe way
481                  * to determine if the link partner supports flow control
482                  * or not.  So just disable it completely in this case.
483                  */
484                 b44_set_flow_ctrl(bp, 0, 0);
485         }
486
487 out:
488         return err;
489 }
490
491 static void b44_stats_update(struct b44 *bp)
492 {
493         unsigned long reg;
494         u32 *val;
495
496         val = &bp->hw_stats.tx_good_octets;
497         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498                 *val++ += br32(bp, reg);
499         }
500
501         /* Pad */
502         reg += 8*4UL;
503
504         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505                 *val++ += br32(bp, reg);
506         }
507 }
508
509 static void b44_link_report(struct b44 *bp)
510 {
511         if (!netif_carrier_ok(bp->dev)) {
512                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
513         } else {
514                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
515                        bp->dev->name,
516                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
518
519                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
520                        "%s for RX.\n",
521                        bp->dev->name,
522                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524         }
525 }
526
527 static void b44_check_phy(struct b44 *bp)
528 {
529         u32 bmsr, aux;
530
531         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
533             (bmsr != 0xffff)) {
534                 if (aux & MII_AUXCTRL_SPEED)
535                         bp->flags |= B44_FLAG_100_BASE_T;
536                 else
537                         bp->flags &= ~B44_FLAG_100_BASE_T;
538                 if (aux & MII_AUXCTRL_DUPLEX)
539                         bp->flags |= B44_FLAG_FULL_DUPLEX;
540                 else
541                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
542
543                 if (!netif_carrier_ok(bp->dev) &&
544                     (bmsr & BMSR_LSTATUS)) {
545                         u32 val = br32(bp, B44_TX_CTRL);
546                         u32 local_adv, remote_adv;
547
548                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
549                                 val |= TX_CTRL_DUPLEX;
550                         else
551                                 val &= ~TX_CTRL_DUPLEX;
552                         bw32(bp, B44_TX_CTRL, val);
553
554                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556                             !b44_readphy(bp, MII_LPA, &remote_adv))
557                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
558
559                         /* Link now up */
560                         netif_carrier_on(bp->dev);
561                         b44_link_report(bp);
562                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
563                         /* Link now down */
564                         netif_carrier_off(bp->dev);
565                         b44_link_report(bp);
566                 }
567
568                 if (bmsr & BMSR_RFAULT)
569                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
570                                bp->dev->name);
571                 if (bmsr & BMSR_JCD)
572                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
573                                bp->dev->name);
574         }
575 }
576
577 static void b44_timer(unsigned long __opaque)
578 {
579         struct b44 *bp = (struct b44 *) __opaque;
580
581         spin_lock_irq(&bp->lock);
582
583         b44_check_phy(bp);
584
585         b44_stats_update(bp);
586
587         spin_unlock_irq(&bp->lock);
588
589         bp->timer.expires = jiffies + HZ;
590         add_timer(&bp->timer);
591 }
592
593 static void b44_tx(struct b44 *bp)
594 {
595         u32 cur, cons;
596
597         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598         cur /= sizeof(struct dma_desc);
599
600         /* XXX needs updating when NETIF_F_SG is supported */
601         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602                 struct ring_info *rp = &bp->tx_buffers[cons];
603                 struct sk_buff *skb = rp->skb;
604
605                 BUG_ON(skb == NULL);
606
607                 pci_unmap_single(bp->pdev,
608                                  pci_unmap_addr(rp, mapping),
609                                  skb->len,
610                                  PCI_DMA_TODEVICE);
611                 rp->skb = NULL;
612                 dev_kfree_skb_irq(skb);
613         }
614
615         bp->tx_cons = cons;
616         if (netif_queue_stopped(bp->dev) &&
617             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618                 netif_wake_queue(bp->dev);
619
620         bw32(bp, B44_GPTIMER, 0);
621 }
622
623 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
624  * before the DMA address you give it.  So we allocate 30 more bytes
625  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626  * point the chip at 30 bytes past where the rx_header will go.
627  */
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
629 {
630         struct dma_desc *dp;
631         struct ring_info *src_map, *map;
632         struct rx_header *rh;
633         struct sk_buff *skb;
634         dma_addr_t mapping;
635         int dest_idx;
636         u32 ctrl;
637
638         src_map = NULL;
639         if (src_idx >= 0)
640                 src_map = &bp->rx_buffers[src_idx];
641         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642         map = &bp->rx_buffers[dest_idx];
643         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
644         if (skb == NULL)
645                 return -ENOMEM;
646
647         mapping = pci_map_single(bp->pdev, skb->data,
648                                  RX_PKT_BUF_SZ,
649                                  PCI_DMA_FROMDEVICE);
650
651         /* Hardware bug work-around, the chip is unable to do PCI DMA
652            to/from anything above 1GB :-( */
653         if (dma_mapping_error(mapping) ||
654                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
655                 /* Sigh... */
656                 if (!dma_mapping_error(mapping))
657                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
658                 dev_kfree_skb_any(skb);
659                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
660                 if (skb == NULL)
661                         return -ENOMEM;
662                 mapping = pci_map_single(bp->pdev, skb->data,
663                                          RX_PKT_BUF_SZ,
664                                          PCI_DMA_FROMDEVICE);
665                 if (dma_mapping_error(mapping) ||
666                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667                         if (!dma_mapping_error(mapping))
668                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
669                         dev_kfree_skb_any(skb);
670                         return -ENOMEM;
671                 }
672         }
673
674         skb->dev = bp->dev;
675         skb_reserve(skb, bp->rx_offset);
676
677         rh = (struct rx_header *)
678                 (skb->data - bp->rx_offset);
679         rh->len = 0;
680         rh->flags = 0;
681
682         map->skb = skb;
683         pci_unmap_addr_set(map, mapping, mapping);
684
685         if (src_map != NULL)
686                 src_map->skb = NULL;
687
688         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
689         if (dest_idx == (B44_RX_RING_SIZE - 1))
690                 ctrl |= DESC_CTRL_EOT;
691
692         dp = &bp->rx_ring[dest_idx];
693         dp->ctrl = cpu_to_le32(ctrl);
694         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
695
696         if (bp->flags & B44_FLAG_RX_RING_HACK)
697                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
698                                              dest_idx * sizeof(dp),
699                                              DMA_BIDIRECTIONAL);
700
701         return RX_PKT_BUF_SZ;
702 }
703
704 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
705 {
706         struct dma_desc *src_desc, *dest_desc;
707         struct ring_info *src_map, *dest_map;
708         struct rx_header *rh;
709         int dest_idx;
710         u32 ctrl;
711
712         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
713         dest_desc = &bp->rx_ring[dest_idx];
714         dest_map = &bp->rx_buffers[dest_idx];
715         src_desc = &bp->rx_ring[src_idx];
716         src_map = &bp->rx_buffers[src_idx];
717
718         dest_map->skb = src_map->skb;
719         rh = (struct rx_header *) src_map->skb->data;
720         rh->len = 0;
721         rh->flags = 0;
722         pci_unmap_addr_set(dest_map, mapping,
723                            pci_unmap_addr(src_map, mapping));
724
725         if (bp->flags & B44_FLAG_RX_RING_HACK)
726                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
727                                           src_idx * sizeof(src_desc),
728                                           DMA_BIDIRECTIONAL);
729
730         ctrl = src_desc->ctrl;
731         if (dest_idx == (B44_RX_RING_SIZE - 1))
732                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
733         else
734                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
735
736         dest_desc->ctrl = ctrl;
737         dest_desc->addr = src_desc->addr;
738
739         src_map->skb = NULL;
740
741         if (bp->flags & B44_FLAG_RX_RING_HACK)
742                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
743                                              dest_idx * sizeof(dest_desc),
744                                              DMA_BIDIRECTIONAL);
745
746         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
747                                        RX_PKT_BUF_SZ,
748                                        PCI_DMA_FROMDEVICE);
749 }
750
751 static int b44_rx(struct b44 *bp, int budget)
752 {
753         int received;
754         u32 cons, prod;
755
756         received = 0;
757         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
758         prod /= sizeof(struct dma_desc);
759         cons = bp->rx_cons;
760
761         while (cons != prod && budget > 0) {
762                 struct ring_info *rp = &bp->rx_buffers[cons];
763                 struct sk_buff *skb = rp->skb;
764                 dma_addr_t map = pci_unmap_addr(rp, mapping);
765                 struct rx_header *rh;
766                 u16 len;
767
768                 pci_dma_sync_single_for_cpu(bp->pdev, map,
769                                             RX_PKT_BUF_SZ,
770                                             PCI_DMA_FROMDEVICE);
771                 rh = (struct rx_header *) skb->data;
772                 len = cpu_to_le16(rh->len);
773                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
774                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
775                 drop_it:
776                         b44_recycle_rx(bp, cons, bp->rx_prod);
777                 drop_it_no_recycle:
778                         bp->stats.rx_dropped++;
779                         goto next_pkt;
780                 }
781
782                 if (len == 0) {
783                         int i = 0;
784
785                         do {
786                                 udelay(2);
787                                 barrier();
788                                 len = cpu_to_le16(rh->len);
789                         } while (len == 0 && i++ < 5);
790                         if (len == 0)
791                                 goto drop_it;
792                 }
793
794                 /* Omit CRC. */
795                 len -= 4;
796
797                 if (len > RX_COPY_THRESHOLD) {
798                         int skb_size;
799                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
800                         if (skb_size < 0)
801                                 goto drop_it;
802                         pci_unmap_single(bp->pdev, map,
803                                          skb_size, PCI_DMA_FROMDEVICE);
804                         /* Leave out rx_header */
805                         skb_put(skb, len+bp->rx_offset);
806                         skb_pull(skb,bp->rx_offset);
807                 } else {
808                         struct sk_buff *copy_skb;
809
810                         b44_recycle_rx(bp, cons, bp->rx_prod);
811                         copy_skb = dev_alloc_skb(len + 2);
812                         if (copy_skb == NULL)
813                                 goto drop_it_no_recycle;
814
815                         copy_skb->dev = bp->dev;
816                         skb_reserve(copy_skb, 2);
817                         skb_put(copy_skb, len);
818                         /* DMA sync done above, copy just the actual packet */
819                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
820
821                         skb = copy_skb;
822                 }
823                 skb->ip_summed = CHECKSUM_NONE;
824                 skb->protocol = eth_type_trans(skb, bp->dev);
825                 netif_receive_skb(skb);
826                 bp->dev->last_rx = jiffies;
827                 received++;
828                 budget--;
829         next_pkt:
830                 bp->rx_prod = (bp->rx_prod + 1) &
831                         (B44_RX_RING_SIZE - 1);
832                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
833         }
834
835         bp->rx_cons = cons;
836         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837
838         return received;
839 }
840
841 static int b44_poll(struct net_device *netdev, int *budget)
842 {
843         struct b44 *bp = netdev_priv(netdev);
844         int done;
845
846         spin_lock_irq(&bp->lock);
847
848         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849                 /* spin_lock(&bp->tx_lock); */
850                 b44_tx(bp);
851                 /* spin_unlock(&bp->tx_lock); */
852         }
853         spin_unlock_irq(&bp->lock);
854
855         done = 1;
856         if (bp->istat & ISTAT_RX) {
857                 int orig_budget = *budget;
858                 int work_done;
859
860                 if (orig_budget > netdev->quota)
861                         orig_budget = netdev->quota;
862
863                 work_done = b44_rx(bp, orig_budget);
864
865                 *budget -= work_done;
866                 netdev->quota -= work_done;
867
868                 if (work_done >= orig_budget)
869                         done = 0;
870         }
871
872         if (bp->istat & ISTAT_ERRORS) {
873                 spin_lock_irq(&bp->lock);
874                 b44_halt(bp);
875                 b44_init_rings(bp);
876                 b44_init_hw(bp);
877                 netif_wake_queue(bp->dev);
878                 spin_unlock_irq(&bp->lock);
879                 done = 1;
880         }
881
882         if (done) {
883                 netif_rx_complete(netdev);
884                 b44_enable_ints(bp);
885         }
886
887         return (done ? 0 : 1);
888 }
889
890 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
891 {
892         struct net_device *dev = dev_id;
893         struct b44 *bp = netdev_priv(dev);
894         u32 istat, imask;
895         int handled = 0;
896
897         spin_lock(&bp->lock);
898
899         istat = br32(bp, B44_ISTAT);
900         imask = br32(bp, B44_IMASK);
901
902         /* ??? What the fuck is the purpose of the interrupt mask
903          * ??? register if we have to mask it out by hand anyways?
904          */
905         istat &= imask;
906         if (istat) {
907                 handled = 1;
908
909                 if (unlikely(!netif_running(dev))) {
910                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
911                         goto irq_ack;
912                 }
913
914                 if (netif_rx_schedule_prep(dev)) {
915                         /* NOTE: These writes are posted by the readback of
916                          *       the ISTAT register below.
917                          */
918                         bp->istat = istat;
919                         __b44_disable_ints(bp);
920                         __netif_rx_schedule(dev);
921                 } else {
922                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
923                                dev->name);
924                 }
925
926 irq_ack:
927                 bw32(bp, B44_ISTAT, istat);
928                 br32(bp, B44_ISTAT);
929         }
930         spin_unlock(&bp->lock);
931         return IRQ_RETVAL(handled);
932 }
933
934 static void b44_tx_timeout(struct net_device *dev)
935 {
936         struct b44 *bp = netdev_priv(dev);
937
938         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
939                dev->name);
940
941         spin_lock_irq(&bp->lock);
942
943         b44_halt(bp);
944         b44_init_rings(bp);
945         b44_init_hw(bp);
946
947         spin_unlock_irq(&bp->lock);
948
949         b44_enable_ints(bp);
950
951         netif_wake_queue(dev);
952 }
953
954 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
955 {
956         struct b44 *bp = netdev_priv(dev);
957         struct sk_buff *bounce_skb;
958         int rc = NETDEV_TX_OK;
959         dma_addr_t mapping;
960         u32 len, entry, ctrl;
961
962         len = skb->len;
963         spin_lock_irq(&bp->lock);
964
965         /* This is a hard error, log it. */
966         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967                 netif_stop_queue(dev);
968                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
969                        dev->name);
970                 goto err_out;
971         }
972
973         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
975                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
976                 if (!dma_mapping_error(mapping))
977                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
978
979                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
980                                              GFP_ATOMIC|GFP_DMA);
981                 if (!bounce_skb)
982                         goto err_out;
983
984                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
985                                          len, PCI_DMA_TODEVICE);
986                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987                         if (!dma_mapping_error(mapping))
988                                 pci_unmap_single(bp->pdev, mapping,
989                                          len, PCI_DMA_TODEVICE);
990                         dev_kfree_skb_any(bounce_skb);
991                         goto err_out;
992                 }
993
994                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
995                 dev_kfree_skb_any(skb);
996                 skb = bounce_skb;
997         }
998
999         entry = bp->tx_prod;
1000         bp->tx_buffers[entry].skb = skb;
1001         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1002
1003         ctrl  = (len & DESC_CTRL_LEN);
1004         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1005         if (entry == (B44_TX_RING_SIZE - 1))
1006                 ctrl |= DESC_CTRL_EOT;
1007
1008         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1009         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1010
1011         if (bp->flags & B44_FLAG_TX_RING_HACK)
1012                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1013                                              entry * sizeof(bp->tx_ring[0]),
1014                                              DMA_TO_DEVICE);
1015
1016         entry = NEXT_TX(entry);
1017
1018         bp->tx_prod = entry;
1019
1020         wmb();
1021
1022         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1023         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1024                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1025         if (bp->flags & B44_FLAG_REORDER_BUG)
1026                 br32(bp, B44_DMATX_PTR);
1027
1028         if (TX_BUFFS_AVAIL(bp) < 1)
1029                 netif_stop_queue(dev);
1030
1031         dev->trans_start = jiffies;
1032
1033 out_unlock:
1034         spin_unlock_irq(&bp->lock);
1035
1036         return rc;
1037
1038 err_out:
1039         rc = NETDEV_TX_BUSY;
1040         goto out_unlock;
1041 }
1042
1043 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1044 {
1045         struct b44 *bp = netdev_priv(dev);
1046
1047         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1048                 return -EINVAL;
1049
1050         if (!netif_running(dev)) {
1051                 /* We'll just catch it later when the
1052                  * device is up'd.
1053                  */
1054                 dev->mtu = new_mtu;
1055                 return 0;
1056         }
1057
1058         spin_lock_irq(&bp->lock);
1059         b44_halt(bp);
1060         dev->mtu = new_mtu;
1061         b44_init_rings(bp);
1062         b44_init_hw(bp);
1063         spin_unlock_irq(&bp->lock);
1064
1065         b44_enable_ints(bp);
1066
1067         return 0;
1068 }
1069
1070 /* Free up pending packets in all rx/tx rings.
1071  *
1072  * The chip has been shut down and the driver detached from
1073  * the networking, so no interrupts or new tx packets will
1074  * end up in the driver.  bp->lock is not held and we are not
1075  * in an interrupt context and thus may sleep.
1076  */
1077 static void b44_free_rings(struct b44 *bp)
1078 {
1079         struct ring_info *rp;
1080         int i;
1081
1082         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1083                 rp = &bp->rx_buffers[i];
1084
1085                 if (rp->skb == NULL)
1086                         continue;
1087                 pci_unmap_single(bp->pdev,
1088                                  pci_unmap_addr(rp, mapping),
1089                                  RX_PKT_BUF_SZ,
1090                                  PCI_DMA_FROMDEVICE);
1091                 dev_kfree_skb_any(rp->skb);
1092                 rp->skb = NULL;
1093         }
1094
1095         /* XXX needs changes once NETIF_F_SG is set... */
1096         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1097                 rp = &bp->tx_buffers[i];
1098
1099                 if (rp->skb == NULL)
1100                         continue;
1101                 pci_unmap_single(bp->pdev,
1102                                  pci_unmap_addr(rp, mapping),
1103                                  rp->skb->len,
1104                                  PCI_DMA_TODEVICE);
1105                 dev_kfree_skb_any(rp->skb);
1106                 rp->skb = NULL;
1107         }
1108 }
1109
1110 /* Initialize tx/rx rings for packet processing.
1111  *
1112  * The chip has been shut down and the driver detached from
1113  * the networking, so no interrupts or new tx packets will
1114  * end up in the driver.
1115  */
1116 static void b44_init_rings(struct b44 *bp)
1117 {
1118         int i;
1119
1120         b44_free_rings(bp);
1121
1122         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1123         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1124
1125         if (bp->flags & B44_FLAG_RX_RING_HACK)
1126                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1127                                            DMA_TABLE_BYTES,
1128                                            PCI_DMA_BIDIRECTIONAL);
1129
1130         if (bp->flags & B44_FLAG_TX_RING_HACK)
1131                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1132                                            DMA_TABLE_BYTES,
1133                                            PCI_DMA_TODEVICE);
1134
1135         for (i = 0; i < bp->rx_pending; i++) {
1136                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1137                         break;
1138         }
1139 }
1140
1141 /*
1142  * Must not be invoked with interrupt sources disabled and
1143  * the hardware shutdown down.
1144  */
1145 static void b44_free_consistent(struct b44 *bp)
1146 {
1147         kfree(bp->rx_buffers);
1148         bp->rx_buffers = NULL;
1149         kfree(bp->tx_buffers);
1150         bp->tx_buffers = NULL;
1151         if (bp->rx_ring) {
1152                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1153                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1154                                          DMA_TABLE_BYTES,
1155                                          DMA_BIDIRECTIONAL);
1156                         kfree(bp->rx_ring);
1157                 } else
1158                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1159                                             bp->rx_ring, bp->rx_ring_dma);
1160                 bp->rx_ring = NULL;
1161                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1162         }
1163         if (bp->tx_ring) {
1164                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1165                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1166                                          DMA_TABLE_BYTES,
1167                                          DMA_TO_DEVICE);
1168                         kfree(bp->tx_ring);
1169                 } else
1170                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171                                             bp->tx_ring, bp->tx_ring_dma);
1172                 bp->tx_ring = NULL;
1173                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1174         }
1175 }
1176
1177 /*
1178  * Must not be invoked with interrupt sources disabled and
1179  * the hardware shutdown down.  Can sleep.
1180  */
1181 static int b44_alloc_consistent(struct b44 *bp)
1182 {
1183         int size;
1184
1185         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1186         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1187         if (!bp->rx_buffers)
1188                 goto out_err;
1189
1190         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1191         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1192         if (!bp->tx_buffers)
1193                 goto out_err;
1194
1195         size = DMA_TABLE_BYTES;
1196         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1197         if (!bp->rx_ring) {
1198                 /* Allocation may have failed due to pci_alloc_consistent
1199                    insisting on use of GFP_DMA, which is more restrictive
1200                    than necessary...  */
1201                 struct dma_desc *rx_ring;
1202                 dma_addr_t rx_ring_dma;
1203
1204                 rx_ring = kzalloc(size, GFP_KERNEL);
1205                 if (!rx_ring)
1206                         goto out_err;
1207
1208                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1209                                              DMA_TABLE_BYTES,
1210                                              DMA_BIDIRECTIONAL);
1211
1212                 if (dma_mapping_error(rx_ring_dma) ||
1213                         rx_ring_dma + size > B44_DMA_MASK) {
1214                         kfree(rx_ring);
1215                         goto out_err;
1216                 }
1217
1218                 bp->rx_ring = rx_ring;
1219                 bp->rx_ring_dma = rx_ring_dma;
1220                 bp->flags |= B44_FLAG_RX_RING_HACK;
1221         }
1222
1223         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1224         if (!bp->tx_ring) {
1225                 /* Allocation may have failed due to pci_alloc_consistent
1226                    insisting on use of GFP_DMA, which is more restrictive
1227                    than necessary...  */
1228                 struct dma_desc *tx_ring;
1229                 dma_addr_t tx_ring_dma;
1230
1231                 tx_ring = kzalloc(size, GFP_KERNEL);
1232                 if (!tx_ring)
1233                         goto out_err;
1234
1235                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1236                                              DMA_TABLE_BYTES,
1237                                              DMA_TO_DEVICE);
1238
1239                 if (dma_mapping_error(tx_ring_dma) ||
1240                         tx_ring_dma + size > B44_DMA_MASK) {
1241                         kfree(tx_ring);
1242                         goto out_err;
1243                 }
1244
1245                 bp->tx_ring = tx_ring;
1246                 bp->tx_ring_dma = tx_ring_dma;
1247                 bp->flags |= B44_FLAG_TX_RING_HACK;
1248         }
1249
1250         return 0;
1251
1252 out_err:
1253         b44_free_consistent(bp);
1254         return -ENOMEM;
1255 }
1256
1257 /* bp->lock is held. */
1258 static void b44_clear_stats(struct b44 *bp)
1259 {
1260         unsigned long reg;
1261
1262         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1263         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1264                 br32(bp, reg);
1265         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1266                 br32(bp, reg);
1267 }
1268
1269 /* bp->lock is held. */
1270 static void b44_chip_reset(struct b44 *bp)
1271 {
1272         if (ssb_is_core_up(bp)) {
1273                 bw32(bp, B44_RCV_LAZY, 0);
1274                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1275                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1276                 bw32(bp, B44_DMATX_CTRL, 0);
1277                 bp->tx_prod = bp->tx_cons = 0;
1278                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1279                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1280                                      100, 0);
1281                 }
1282                 bw32(bp, B44_DMARX_CTRL, 0);
1283                 bp->rx_prod = bp->rx_cons = 0;
1284         } else {
1285                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1286                                    SBINTVEC_ENET0 :
1287                                    SBINTVEC_ENET1));
1288         }
1289
1290         ssb_core_reset(bp);
1291
1292         b44_clear_stats(bp);
1293
1294         /* Make PHY accessible. */
1295         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1297         br32(bp, B44_MDIO_CTRL);
1298
1299         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1300                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1301                 br32(bp, B44_ENET_CTRL);
1302                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1303         } else {
1304                 u32 val = br32(bp, B44_DEVCTRL);
1305
1306                 if (val & DEVCTRL_EPR) {
1307                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1308                         br32(bp, B44_DEVCTRL);
1309                         udelay(100);
1310                 }
1311                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1312         }
1313 }
1314
1315 /* bp->lock is held. */
1316 static void b44_halt(struct b44 *bp)
1317 {
1318         b44_disable_ints(bp);
1319         b44_chip_reset(bp);
1320 }
1321
1322 /* bp->lock is held. */
1323 static void __b44_set_mac_addr(struct b44 *bp)
1324 {
1325         bw32(bp, B44_CAM_CTRL, 0);
1326         if (!(bp->dev->flags & IFF_PROMISC)) {
1327                 u32 val;
1328
1329                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1330                 val = br32(bp, B44_CAM_CTRL);
1331                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1332         }
1333 }
1334
1335 static int b44_set_mac_addr(struct net_device *dev, void *p)
1336 {
1337         struct b44 *bp = netdev_priv(dev);
1338         struct sockaddr *addr = p;
1339
1340         if (netif_running(dev))
1341                 return -EBUSY;
1342
1343         if (!is_valid_ether_addr(addr->sa_data))
1344                 return -EINVAL;
1345
1346         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1347
1348         spin_lock_irq(&bp->lock);
1349         __b44_set_mac_addr(bp);
1350         spin_unlock_irq(&bp->lock);
1351
1352         return 0;
1353 }
1354
1355 /* Called at device open time to get the chip ready for
1356  * packet processing.  Invoked with bp->lock held.
1357  */
1358 static void __b44_set_rx_mode(struct net_device *);
1359 static void b44_init_hw(struct b44 *bp)
1360 {
1361         u32 val;
1362
1363         b44_chip_reset(bp);
1364         b44_phy_reset(bp);
1365         b44_setup_phy(bp);
1366
1367         /* Enable CRC32, set proper LED modes and power on PHY */
1368         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1369         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1370
1371         /* This sets the MAC address too.  */
1372         __b44_set_rx_mode(bp->dev);
1373
1374         /* MTU + eth header + possible VLAN tag + struct rx_header */
1375         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1376         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1377
1378         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1379         bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1380         bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1381         bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1382                               (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1383         bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1384
1385         bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1386         bp->rx_prod = bp->rx_pending;
1387
1388         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1389
1390         val = br32(bp, B44_ENET_CTRL);
1391         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1392 }
1393
1394 static int b44_open(struct net_device *dev)
1395 {
1396         struct b44 *bp = netdev_priv(dev);
1397         int err;
1398
1399         err = b44_alloc_consistent(bp);
1400         if (err)
1401                 goto out;
1402
1403         b44_init_rings(bp);
1404         b44_init_hw(bp);
1405
1406         b44_check_phy(bp);
1407
1408         err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1409         if (unlikely(err < 0)) {
1410                 b44_chip_reset(bp);
1411                 b44_free_rings(bp);
1412                 b44_free_consistent(bp);
1413                 goto out;
1414         }
1415
1416         init_timer(&bp->timer);
1417         bp->timer.expires = jiffies + HZ;
1418         bp->timer.data = (unsigned long) bp;
1419         bp->timer.function = b44_timer;
1420         add_timer(&bp->timer);
1421
1422         b44_enable_ints(bp);
1423         netif_start_queue(dev);
1424 out:
1425         return err;
1426 }
1427
1428 #if 0
1429 /*static*/ void b44_dump_state(struct b44 *bp)
1430 {
1431         u32 val32, val32_2, val32_3, val32_4, val32_5;
1432         u16 val16;
1433
1434         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1435         printk("DEBUG: PCI status [%04x] \n", val16);
1436
1437 }
1438 #endif
1439
1440 #ifdef CONFIG_NET_POLL_CONTROLLER
1441 /*
1442  * Polling receive - used by netconsole and other diagnostic tools
1443  * to allow network i/o with interrupts disabled.
1444  */
1445 static void b44_poll_controller(struct net_device *dev)
1446 {
1447         disable_irq(dev->irq);
1448         b44_interrupt(dev->irq, dev, NULL);
1449         enable_irq(dev->irq);
1450 }
1451 #endif
1452
1453
1454 static void b44_setup_wol(struct b44 *bp)
1455 {
1456         u32 val;
1457         u16 pmval;
1458
1459         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1460
1461         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1462
1463                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1464
1465                 val = bp->dev->dev_addr[2] << 24 |
1466                         bp->dev->dev_addr[3] << 16 |
1467                         bp->dev->dev_addr[4] << 8 |
1468                         bp->dev->dev_addr[5];
1469                 bw32(bp, B44_ADDR_LO, val);
1470
1471                 val = bp->dev->dev_addr[0] << 8 |
1472                         bp->dev->dev_addr[1];
1473                 bw32(bp, B44_ADDR_HI, val);
1474
1475                 val = br32(bp, B44_DEVCTRL);
1476                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1477
1478         }
1479
1480         val = br32(bp, B44_SBTMSLOW);
1481         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1482
1483         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1484         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1485
1486 }
1487
1488 static int b44_close(struct net_device *dev)
1489 {
1490         struct b44 *bp = netdev_priv(dev);
1491
1492         netif_stop_queue(dev);
1493
1494         netif_poll_disable(dev);
1495
1496         del_timer_sync(&bp->timer);
1497
1498         spin_lock_irq(&bp->lock);
1499
1500 #if 0
1501         b44_dump_state(bp);
1502 #endif
1503         b44_halt(bp);
1504         b44_free_rings(bp);
1505         netif_carrier_off(dev);
1506
1507         spin_unlock_irq(&bp->lock);
1508
1509         free_irq(dev->irq, dev);
1510
1511         netif_poll_enable(dev);
1512
1513         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1514                 b44_init_hw(bp);
1515                 b44_setup_wol(bp);
1516         }
1517
1518         b44_free_consistent(bp);
1519
1520         return 0;
1521 }
1522
1523 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1524 {
1525         struct b44 *bp = netdev_priv(dev);
1526         struct net_device_stats *nstat = &bp->stats;
1527         struct b44_hw_stats *hwstat = &bp->hw_stats;
1528
1529         /* Convert HW stats into netdevice stats. */
1530         nstat->rx_packets = hwstat->rx_pkts;
1531         nstat->tx_packets = hwstat->tx_pkts;
1532         nstat->rx_bytes   = hwstat->rx_octets;
1533         nstat->tx_bytes   = hwstat->tx_octets;
1534         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1535                              hwstat->tx_oversize_pkts +
1536                              hwstat->tx_underruns +
1537                              hwstat->tx_excessive_cols +
1538                              hwstat->tx_late_cols);
1539         nstat->multicast  = hwstat->tx_multicast_pkts;
1540         nstat->collisions = hwstat->tx_total_cols;
1541
1542         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1543                                    hwstat->rx_undersize);
1544         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1545         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1546         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1547         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1548                                    hwstat->rx_oversize_pkts +
1549                                    hwstat->rx_missed_pkts +
1550                                    hwstat->rx_crc_align_errs +
1551                                    hwstat->rx_undersize +
1552                                    hwstat->rx_crc_errs +
1553                                    hwstat->rx_align_errs +
1554                                    hwstat->rx_symbol_errs);
1555
1556         nstat->tx_aborted_errors = hwstat->tx_underruns;
1557 #if 0
1558         /* Carrier lost counter seems to be broken for some devices */
1559         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1560 #endif
1561
1562         return nstat;
1563 }
1564
1565 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1566 {
1567         struct dev_mc_list *mclist;
1568         int i, num_ents;
1569
1570         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1571         mclist = dev->mc_list;
1572         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1573                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1574         }
1575         return i+1;
1576 }
1577
1578 static void __b44_set_rx_mode(struct net_device *dev)
1579 {
1580         struct b44 *bp = netdev_priv(dev);
1581         u32 val;
1582
1583         val = br32(bp, B44_RXCONFIG);
1584         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1585         if (dev->flags & IFF_PROMISC) {
1586                 val |= RXCONFIG_PROMISC;
1587                 bw32(bp, B44_RXCONFIG, val);
1588         } else {
1589                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1590                 int i = 0;
1591
1592                 __b44_set_mac_addr(bp);
1593
1594                 if (dev->flags & IFF_ALLMULTI)
1595                         val |= RXCONFIG_ALLMULTI;
1596                 else
1597                         i = __b44_load_mcast(bp, dev);
1598
1599                 for (; i < 64; i++) {
1600                         __b44_cam_write(bp, zero, i);
1601                 }
1602                 bw32(bp, B44_RXCONFIG, val);
1603                 val = br32(bp, B44_CAM_CTRL);
1604                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1605         }
1606 }
1607
1608 static void b44_set_rx_mode(struct net_device *dev)
1609 {
1610         struct b44 *bp = netdev_priv(dev);
1611
1612         spin_lock_irq(&bp->lock);
1613         __b44_set_rx_mode(dev);
1614         spin_unlock_irq(&bp->lock);
1615 }
1616
1617 static u32 b44_get_msglevel(struct net_device *dev)
1618 {
1619         struct b44 *bp = netdev_priv(dev);
1620         return bp->msg_enable;
1621 }
1622
1623 static void b44_set_msglevel(struct net_device *dev, u32 value)
1624 {
1625         struct b44 *bp = netdev_priv(dev);
1626         bp->msg_enable = value;
1627 }
1628
1629 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1630 {
1631         struct b44 *bp = netdev_priv(dev);
1632         struct pci_dev *pci_dev = bp->pdev;
1633
1634         strcpy (info->driver, DRV_MODULE_NAME);
1635         strcpy (info->version, DRV_MODULE_VERSION);
1636         strcpy (info->bus_info, pci_name(pci_dev));
1637 }
1638
1639 static int b44_nway_reset(struct net_device *dev)
1640 {
1641         struct b44 *bp = netdev_priv(dev);
1642         u32 bmcr;
1643         int r;
1644
1645         spin_lock_irq(&bp->lock);
1646         b44_readphy(bp, MII_BMCR, &bmcr);
1647         b44_readphy(bp, MII_BMCR, &bmcr);
1648         r = -EINVAL;
1649         if (bmcr & BMCR_ANENABLE) {
1650                 b44_writephy(bp, MII_BMCR,
1651                              bmcr | BMCR_ANRESTART);
1652                 r = 0;
1653         }
1654         spin_unlock_irq(&bp->lock);
1655
1656         return r;
1657 }
1658
1659 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1660 {
1661         struct b44 *bp = netdev_priv(dev);
1662
1663         cmd->supported = (SUPPORTED_Autoneg);
1664         cmd->supported |= (SUPPORTED_100baseT_Half |
1665                           SUPPORTED_100baseT_Full |
1666                           SUPPORTED_10baseT_Half |
1667                           SUPPORTED_10baseT_Full |
1668                           SUPPORTED_MII);
1669
1670         cmd->advertising = 0;
1671         if (bp->flags & B44_FLAG_ADV_10HALF)
1672                 cmd->advertising |= ADVERTISED_10baseT_Half;
1673         if (bp->flags & B44_FLAG_ADV_10FULL)
1674                 cmd->advertising |= ADVERTISED_10baseT_Full;
1675         if (bp->flags & B44_FLAG_ADV_100HALF)
1676                 cmd->advertising |= ADVERTISED_100baseT_Half;
1677         if (bp->flags & B44_FLAG_ADV_100FULL)
1678                 cmd->advertising |= ADVERTISED_100baseT_Full;
1679         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1680         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1681                 SPEED_100 : SPEED_10;
1682         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1683                 DUPLEX_FULL : DUPLEX_HALF;
1684         cmd->port = 0;
1685         cmd->phy_address = bp->phy_addr;
1686         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1687                 XCVR_INTERNAL : XCVR_EXTERNAL;
1688         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1689                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1690         if (cmd->autoneg == AUTONEG_ENABLE)
1691                 cmd->advertising |= ADVERTISED_Autoneg;
1692         if (!netif_running(dev)){
1693                 cmd->speed = 0;
1694                 cmd->duplex = 0xff;
1695         }
1696         cmd->maxtxpkt = 0;
1697         cmd->maxrxpkt = 0;
1698         return 0;
1699 }
1700
1701 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1702 {
1703         struct b44 *bp = netdev_priv(dev);
1704
1705         /* We do not support gigabit. */
1706         if (cmd->autoneg == AUTONEG_ENABLE) {
1707                 if (cmd->advertising &
1708                     (ADVERTISED_1000baseT_Half |
1709                      ADVERTISED_1000baseT_Full))
1710                         return -EINVAL;
1711         } else if ((cmd->speed != SPEED_100 &&
1712                     cmd->speed != SPEED_10) ||
1713                    (cmd->duplex != DUPLEX_HALF &&
1714                     cmd->duplex != DUPLEX_FULL)) {
1715                         return -EINVAL;
1716         }
1717
1718         spin_lock_irq(&bp->lock);
1719
1720         if (cmd->autoneg == AUTONEG_ENABLE) {
1721                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1722                                B44_FLAG_100_BASE_T |
1723                                B44_FLAG_FULL_DUPLEX |
1724                                B44_FLAG_ADV_10HALF |
1725                                B44_FLAG_ADV_10FULL |
1726                                B44_FLAG_ADV_100HALF |
1727                                B44_FLAG_ADV_100FULL);
1728                 if (cmd->advertising == 0) {
1729                         bp->flags |= (B44_FLAG_ADV_10HALF |
1730                                       B44_FLAG_ADV_10FULL |
1731                                       B44_FLAG_ADV_100HALF |
1732                                       B44_FLAG_ADV_100FULL);
1733                 } else {
1734                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1735                                 bp->flags |= B44_FLAG_ADV_10HALF;
1736                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1737                                 bp->flags |= B44_FLAG_ADV_10FULL;
1738                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1739                                 bp->flags |= B44_FLAG_ADV_100HALF;
1740                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1741                                 bp->flags |= B44_FLAG_ADV_100FULL;
1742                 }
1743         } else {
1744                 bp->flags |= B44_FLAG_FORCE_LINK;
1745                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1746                 if (cmd->speed == SPEED_100)
1747                         bp->flags |= B44_FLAG_100_BASE_T;
1748                 if (cmd->duplex == DUPLEX_FULL)
1749                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1750         }
1751
1752         if (netif_running(dev))
1753                 b44_setup_phy(bp);
1754
1755         spin_unlock_irq(&bp->lock);
1756
1757         return 0;
1758 }
1759
1760 static void b44_get_ringparam(struct net_device *dev,
1761                               struct ethtool_ringparam *ering)
1762 {
1763         struct b44 *bp = netdev_priv(dev);
1764
1765         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1766         ering->rx_pending = bp->rx_pending;
1767
1768         /* XXX ethtool lacks a tx_max_pending, oops... */
1769 }
1770
1771 static int b44_set_ringparam(struct net_device *dev,
1772                              struct ethtool_ringparam *ering)
1773 {
1774         struct b44 *bp = netdev_priv(dev);
1775
1776         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1777             (ering->rx_mini_pending != 0) ||
1778             (ering->rx_jumbo_pending != 0) ||
1779             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1780                 return -EINVAL;
1781
1782         spin_lock_irq(&bp->lock);
1783
1784         bp->rx_pending = ering->rx_pending;
1785         bp->tx_pending = ering->tx_pending;
1786
1787         b44_halt(bp);
1788         b44_init_rings(bp);
1789         b44_init_hw(bp);
1790         netif_wake_queue(bp->dev);
1791         spin_unlock_irq(&bp->lock);
1792
1793         b44_enable_ints(bp);
1794
1795         return 0;
1796 }
1797
1798 static void b44_get_pauseparam(struct net_device *dev,
1799                                 struct ethtool_pauseparam *epause)
1800 {
1801         struct b44 *bp = netdev_priv(dev);
1802
1803         epause->autoneg =
1804                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1805         epause->rx_pause =
1806                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1807         epause->tx_pause =
1808                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1809 }
1810
1811 static int b44_set_pauseparam(struct net_device *dev,
1812                                 struct ethtool_pauseparam *epause)
1813 {
1814         struct b44 *bp = netdev_priv(dev);
1815
1816         spin_lock_irq(&bp->lock);
1817         if (epause->autoneg)
1818                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1819         else
1820                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1821         if (epause->rx_pause)
1822                 bp->flags |= B44_FLAG_RX_PAUSE;
1823         else
1824                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1825         if (epause->tx_pause)
1826                 bp->flags |= B44_FLAG_TX_PAUSE;
1827         else
1828                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1829         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1830                 b44_halt(bp);
1831                 b44_init_rings(bp);
1832                 b44_init_hw(bp);
1833         } else {
1834                 __b44_set_flow_ctrl(bp, bp->flags);
1835         }
1836         spin_unlock_irq(&bp->lock);
1837
1838         b44_enable_ints(bp);
1839
1840         return 0;
1841 }
1842
1843 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1844 {
1845         switch(stringset) {
1846         case ETH_SS_STATS:
1847                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1848                 break;
1849         }
1850 }
1851
1852 static int b44_get_stats_count(struct net_device *dev)
1853 {
1854         return ARRAY_SIZE(b44_gstrings);
1855 }
1856
1857 static void b44_get_ethtool_stats(struct net_device *dev,
1858                                   struct ethtool_stats *stats, u64 *data)
1859 {
1860         struct b44 *bp = netdev_priv(dev);
1861         u32 *val = &bp->hw_stats.tx_good_octets;
1862         u32 i;
1863
1864         spin_lock_irq(&bp->lock);
1865
1866         b44_stats_update(bp);
1867
1868         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1869                 *data++ = *val++;
1870
1871         spin_unlock_irq(&bp->lock);
1872 }
1873
1874 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1875 {
1876         struct b44 *bp = netdev_priv(dev);
1877
1878         wol->supported = WAKE_MAGIC;
1879         if (bp->flags & B44_FLAG_WOL_ENABLE)
1880                 wol->wolopts = WAKE_MAGIC;
1881         else
1882                 wol->wolopts = 0;
1883         memset(&wol->sopass, 0, sizeof(wol->sopass));
1884 }
1885
1886 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1887 {
1888         struct b44 *bp = netdev_priv(dev);
1889
1890         spin_lock_irq(&bp->lock);
1891         if (wol->wolopts & WAKE_MAGIC)
1892                 bp->flags |= B44_FLAG_WOL_ENABLE;
1893         else
1894                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
1895         spin_unlock_irq(&bp->lock);
1896
1897         return 0;
1898 }
1899
1900 static struct ethtool_ops b44_ethtool_ops = {
1901         .get_drvinfo            = b44_get_drvinfo,
1902         .get_settings           = b44_get_settings,
1903         .set_settings           = b44_set_settings,
1904         .nway_reset             = b44_nway_reset,
1905         .get_link               = ethtool_op_get_link,
1906         .get_wol                = b44_get_wol,
1907         .set_wol                = b44_set_wol,
1908         .get_ringparam          = b44_get_ringparam,
1909         .set_ringparam          = b44_set_ringparam,
1910         .get_pauseparam         = b44_get_pauseparam,
1911         .set_pauseparam         = b44_set_pauseparam,
1912         .get_msglevel           = b44_get_msglevel,
1913         .set_msglevel           = b44_set_msglevel,
1914         .get_strings            = b44_get_strings,
1915         .get_stats_count        = b44_get_stats_count,
1916         .get_ethtool_stats      = b44_get_ethtool_stats,
1917         .get_perm_addr          = ethtool_op_get_perm_addr,
1918 };
1919
1920 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1921 {
1922         struct mii_ioctl_data *data = if_mii(ifr);
1923         struct b44 *bp = netdev_priv(dev);
1924         int err = -EINVAL;
1925
1926         if (!netif_running(dev))
1927                 goto out;
1928
1929         spin_lock_irq(&bp->lock);
1930         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1931         spin_unlock_irq(&bp->lock);
1932 out:
1933         return err;
1934 }
1935
1936 /* Read 128-bytes of EEPROM. */
1937 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1938 {
1939         long i;
1940         u16 *ptr = (u16 *) data;
1941
1942         for (i = 0; i < 128; i += 2)
1943                 ptr[i / 2] = readw(bp->regs + 4096 + i);
1944
1945         return 0;
1946 }
1947
1948 static int __devinit b44_get_invariants(struct b44 *bp)
1949 {
1950         u8 eeprom[128];
1951         int err;
1952
1953         err = b44_read_eeprom(bp, &eeprom[0]);
1954         if (err)
1955                 goto out;
1956
1957         bp->dev->dev_addr[0] = eeprom[79];
1958         bp->dev->dev_addr[1] = eeprom[78];
1959         bp->dev->dev_addr[2] = eeprom[81];
1960         bp->dev->dev_addr[3] = eeprom[80];
1961         bp->dev->dev_addr[4] = eeprom[83];
1962         bp->dev->dev_addr[5] = eeprom[82];
1963
1964         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1965                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1966                 return -EINVAL;
1967         }
1968
1969         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1970
1971         bp->phy_addr = eeprom[90] & 0x1f;
1972
1973         /* With this, plus the rx_header prepended to the data by the
1974          * hardware, we'll land the ethernet header on a 2-byte boundary.
1975          */
1976         bp->rx_offset = 30;
1977
1978         bp->imask = IMASK_DEF;
1979
1980         bp->core_unit = ssb_core_unit(bp);
1981         bp->dma_offset = SB_PCI_DMA;
1982
1983         /* XXX - really required?
1984            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1985          */
1986
1987         if (ssb_get_core_rev(bp) >= 7)
1988                 bp->flags |= B44_FLAG_B0_ANDLATER;
1989
1990 out:
1991         return err;
1992 }
1993
1994 static int __devinit b44_init_one(struct pci_dev *pdev,
1995                                   const struct pci_device_id *ent)
1996 {
1997         static int b44_version_printed = 0;
1998         unsigned long b44reg_base, b44reg_len;
1999         struct net_device *dev;
2000         struct b44 *bp;
2001         int err, i;
2002
2003         if (b44_version_printed++ == 0)
2004                 printk(KERN_INFO "%s", version);
2005
2006         err = pci_enable_device(pdev);
2007         if (err) {
2008                 printk(KERN_ERR PFX "Cannot enable PCI device, "
2009                        "aborting.\n");
2010                 return err;
2011         }
2012
2013         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2014                 printk(KERN_ERR PFX "Cannot find proper PCI device "
2015                        "base address, aborting.\n");
2016                 err = -ENODEV;
2017                 goto err_out_disable_pdev;
2018         }
2019
2020         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2021         if (err) {
2022                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
2023                        "aborting.\n");
2024                 goto err_out_disable_pdev;
2025         }
2026
2027         pci_set_master(pdev);
2028
2029         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2030         if (err) {
2031                 printk(KERN_ERR PFX "No usable DMA configuration, "
2032                        "aborting.\n");
2033                 goto err_out_free_res;
2034         }
2035
2036         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2037         if (err) {
2038                 printk(KERN_ERR PFX "No usable DMA configuration, "
2039                        "aborting.\n");
2040                 goto err_out_free_res;
2041         }
2042
2043         b44reg_base = pci_resource_start(pdev, 0);
2044         b44reg_len = pci_resource_len(pdev, 0);
2045
2046         dev = alloc_etherdev(sizeof(*bp));
2047         if (!dev) {
2048                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
2049                 err = -ENOMEM;
2050                 goto err_out_free_res;
2051         }
2052
2053         SET_MODULE_OWNER(dev);
2054         SET_NETDEV_DEV(dev,&pdev->dev);
2055
2056         /* No interesting netdevice features in this card... */
2057         dev->features |= 0;
2058
2059         bp = netdev_priv(dev);
2060         bp->pdev = pdev;
2061         bp->dev = dev;
2062
2063         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2064
2065         spin_lock_init(&bp->lock);
2066
2067         bp->regs = ioremap(b44reg_base, b44reg_len);
2068         if (bp->regs == 0UL) {
2069                 printk(KERN_ERR PFX "Cannot map device registers, "
2070                        "aborting.\n");
2071                 err = -ENOMEM;
2072                 goto err_out_free_dev;
2073         }
2074
2075         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2076         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2077
2078         dev->open = b44_open;
2079         dev->stop = b44_close;
2080         dev->hard_start_xmit = b44_start_xmit;
2081         dev->get_stats = b44_get_stats;
2082         dev->set_multicast_list = b44_set_rx_mode;
2083         dev->set_mac_address = b44_set_mac_addr;
2084         dev->do_ioctl = b44_ioctl;
2085         dev->tx_timeout = b44_tx_timeout;
2086         dev->poll = b44_poll;
2087         dev->weight = 64;
2088         dev->watchdog_timeo = B44_TX_TIMEOUT;
2089 #ifdef CONFIG_NET_POLL_CONTROLLER
2090         dev->poll_controller = b44_poll_controller;
2091 #endif
2092         dev->change_mtu = b44_change_mtu;
2093         dev->irq = pdev->irq;
2094         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2095
2096         netif_carrier_off(dev);
2097
2098         err = b44_get_invariants(bp);
2099         if (err) {
2100                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2101                        "aborting.\n");
2102                 goto err_out_iounmap;
2103         }
2104
2105         bp->mii_if.dev = dev;
2106         bp->mii_if.mdio_read = b44_mii_read;
2107         bp->mii_if.mdio_write = b44_mii_write;
2108         bp->mii_if.phy_id = bp->phy_addr;
2109         bp->mii_if.phy_id_mask = 0x1f;
2110         bp->mii_if.reg_num_mask = 0x1f;
2111
2112         /* By default, advertise all speed/duplex settings. */
2113         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2114                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2115
2116         /* By default, auto-negotiate PAUSE. */
2117         bp->flags |= B44_FLAG_PAUSE_AUTO;
2118
2119         err = register_netdev(dev);
2120         if (err) {
2121                 printk(KERN_ERR PFX "Cannot register net device, "
2122                        "aborting.\n");
2123                 goto err_out_iounmap;
2124         }
2125
2126         pci_set_drvdata(pdev, dev);
2127
2128         pci_save_state(bp->pdev);
2129
2130         /* Chip reset provides power to the b44 MAC & PCI cores, which
2131          * is necessary for MAC register access.
2132          */
2133         b44_chip_reset(bp);
2134
2135         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2136         for (i = 0; i < 6; i++)
2137                 printk("%2.2x%c", dev->dev_addr[i],
2138                        i == 5 ? '\n' : ':');
2139
2140         return 0;
2141
2142 err_out_iounmap:
2143         iounmap(bp->regs);
2144
2145 err_out_free_dev:
2146         free_netdev(dev);
2147
2148 err_out_free_res:
2149         pci_release_regions(pdev);
2150
2151 err_out_disable_pdev:
2152         pci_disable_device(pdev);
2153         pci_set_drvdata(pdev, NULL);
2154         return err;
2155 }
2156
2157 static void __devexit b44_remove_one(struct pci_dev *pdev)
2158 {
2159         struct net_device *dev = pci_get_drvdata(pdev);
2160         struct b44 *bp = netdev_priv(dev);
2161
2162         unregister_netdev(dev);
2163         iounmap(bp->regs);
2164         free_netdev(dev);
2165         pci_release_regions(pdev);
2166         pci_disable_device(pdev);
2167         pci_set_drvdata(pdev, NULL);
2168 }
2169
2170 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2171 {
2172         struct net_device *dev = pci_get_drvdata(pdev);
2173         struct b44 *bp = netdev_priv(dev);
2174
2175         if (!netif_running(dev))
2176                  return 0;
2177
2178         del_timer_sync(&bp->timer);
2179
2180         spin_lock_irq(&bp->lock);
2181
2182         b44_halt(bp);
2183         netif_carrier_off(bp->dev);
2184         netif_device_detach(bp->dev);
2185         b44_free_rings(bp);
2186
2187         spin_unlock_irq(&bp->lock);
2188
2189         free_irq(dev->irq, dev);
2190         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2191                 b44_init_hw(bp);
2192                 b44_setup_wol(bp);
2193         }
2194         pci_disable_device(pdev);
2195         return 0;
2196 }
2197
2198 static int b44_resume(struct pci_dev *pdev)
2199 {
2200         struct net_device *dev = pci_get_drvdata(pdev);
2201         struct b44 *bp = netdev_priv(dev);
2202
2203         pci_restore_state(pdev);
2204         pci_enable_device(pdev);
2205         pci_set_master(pdev);
2206
2207         if (!netif_running(dev))
2208                 return 0;
2209
2210         if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2211                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2212
2213         spin_lock_irq(&bp->lock);
2214
2215         b44_init_rings(bp);
2216         b44_init_hw(bp);
2217         netif_device_attach(bp->dev);
2218         spin_unlock_irq(&bp->lock);
2219
2220         bp->timer.expires = jiffies + HZ;
2221         add_timer(&bp->timer);
2222
2223         b44_enable_ints(bp);
2224         netif_wake_queue(dev);
2225         return 0;
2226 }
2227
2228 static struct pci_driver b44_driver = {
2229         .name           = DRV_MODULE_NAME,
2230         .id_table       = b44_pci_tbl,
2231         .probe          = b44_init_one,
2232         .remove         = __devexit_p(b44_remove_one),
2233         .suspend        = b44_suspend,
2234         .resume         = b44_resume,
2235 };
2236
2237 static int __init b44_init(void)
2238 {
2239         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2240
2241         /* Setup paramaters for syncing RX/TX DMA descriptors */
2242         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2243         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2244
2245         return pci_module_init(&b44_driver);
2246 }
2247
2248 static void __exit b44_cleanup(void)
2249 {
2250         pci_unregister_driver(&b44_driver);
2251 }
2252
2253 module_init(b44_init);
2254 module_exit(b44_cleanup);
2255