[ETH]: Make eth_type_trans set skb->dev like the other *_type_trans
[safe/jmp/linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62
63 #define TX_RING_GAP(BP) \
64         (B44_TX_RING_SIZE - (BP)->tx_pending)
65 #define TX_BUFFS_AVAIL(BP)                                              \
66         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
67           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
68           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
70
71 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
72 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
73
74 /* minimum number of free TX descriptors required to wake up TX process */
75 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
76
77 /* b44 internal pattern match filter info */
78 #define B44_PATTERN_BASE        0x400
79 #define B44_PATTERN_SIZE        0x80
80 #define B44_PMASK_BASE          0x600
81 #define B44_PMASK_SIZE          0x10
82 #define B44_MAX_PATTERNS        16
83 #define B44_ETHIPV6UDP_HLEN     62
84 #define B44_ETHIPV4UDP_HLEN     42
85
86 static char version[] __devinitdata =
87         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
88
89 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
90 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_MODULE_VERSION);
93
94 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
95 module_param(b44_debug, int, 0);
96 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
97
98 static struct pci_device_id b44_pci_tbl[] = {
99         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
100           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
101         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
102           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
104           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105         { }     /* terminate list with empty entry */
106 };
107
108 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
109
110 static void b44_halt(struct b44 *);
111 static void b44_init_rings(struct b44 *);
112
113 #define B44_FULL_RESET          1
114 #define B44_FULL_RESET_SKIP_PHY 2
115 #define B44_PARTIAL_RESET       3
116
117 static void b44_init_hw(struct b44 *, int);
118
119 static int dma_desc_align_mask;
120 static int dma_desc_sync_size;
121
122 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
123 #define _B44(x...)      # x,
124 B44_STAT_REG_DECLARE
125 #undef _B44
126 };
127
128 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
129                                                 dma_addr_t dma_base,
130                                                 unsigned long offset,
131                                                 enum dma_data_direction dir)
132 {
133         dma_sync_single_range_for_device(&pdev->dev, dma_base,
134                                          offset & dma_desc_align_mask,
135                                          dma_desc_sync_size, dir);
136 }
137
138 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
139                                              dma_addr_t dma_base,
140                                              unsigned long offset,
141                                              enum dma_data_direction dir)
142 {
143         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
144                                       offset & dma_desc_align_mask,
145                                       dma_desc_sync_size, dir);
146 }
147
148 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
149 {
150         return readl(bp->regs + reg);
151 }
152
153 static inline void bw32(const struct b44 *bp,
154                         unsigned long reg, unsigned long val)
155 {
156         writel(val, bp->regs + reg);
157 }
158
159 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
160                         u32 bit, unsigned long timeout, const int clear)
161 {
162         unsigned long i;
163
164         for (i = 0; i < timeout; i++) {
165                 u32 val = br32(bp, reg);
166
167                 if (clear && !(val & bit))
168                         break;
169                 if (!clear && (val & bit))
170                         break;
171                 udelay(10);
172         }
173         if (i == timeout) {
174                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
175                        "%lx to %s.\n",
176                        bp->dev->name,
177                        bit, reg,
178                        (clear ? "clear" : "set"));
179                 return -ENODEV;
180         }
181         return 0;
182 }
183
184 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
185  * buzz words used on this company's website :-)
186  *
187  * All of these routines must be invoked with bp->lock held and
188  * interrupts disabled.
189  */
190
191 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
192 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
193
194 static u32 ssb_get_core_rev(struct b44 *bp)
195 {
196         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
197 }
198
199 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
200 {
201         u32 bar_orig, pci_rev, val;
202
203         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
204         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
205         pci_rev = ssb_get_core_rev(bp);
206
207         val = br32(bp, B44_SBINTVEC);
208         val |= cores;
209         bw32(bp, B44_SBINTVEC, val);
210
211         val = br32(bp, SSB_PCI_TRANS_2);
212         val |= SSB_PCI_PREF | SSB_PCI_BURST;
213         bw32(bp, SSB_PCI_TRANS_2, val);
214
215         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
216
217         return pci_rev;
218 }
219
220 static void ssb_core_disable(struct b44 *bp)
221 {
222         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
223                 return;
224
225         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
226         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
227         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
228         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
229                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
230         br32(bp, B44_SBTMSLOW);
231         udelay(1);
232         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
233         br32(bp, B44_SBTMSLOW);
234         udelay(1);
235 }
236
237 static void ssb_core_reset(struct b44 *bp)
238 {
239         u32 val;
240
241         ssb_core_disable(bp);
242         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243         br32(bp, B44_SBTMSLOW);
244         udelay(1);
245
246         /* Clear SERR if set, this is a hw bug workaround.  */
247         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
248                 bw32(bp, B44_SBTMSHIGH, 0);
249
250         val = br32(bp, B44_SBIMSTATE);
251         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
252                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
253
254         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
255         br32(bp, B44_SBTMSLOW);
256         udelay(1);
257
258         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
259         br32(bp, B44_SBTMSLOW);
260         udelay(1);
261 }
262
263 static int ssb_core_unit(struct b44 *bp)
264 {
265 #if 0
266         u32 val = br32(bp, B44_SBADMATCH0);
267         u32 base;
268
269         type = val & SBADMATCH0_TYPE_MASK;
270         switch (type) {
271         case 0:
272                 base = val & SBADMATCH0_BS0_MASK;
273                 break;
274
275         case 1:
276                 base = val & SBADMATCH0_BS1_MASK;
277                 break;
278
279         case 2:
280         default:
281                 base = val & SBADMATCH0_BS2_MASK;
282                 break;
283         };
284 #endif
285         return 0;
286 }
287
288 static int ssb_is_core_up(struct b44 *bp)
289 {
290         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
291                 == SBTMSLOW_CLOCK);
292 }
293
294 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
295 {
296         u32 val;
297
298         val  = ((u32) data[2]) << 24;
299         val |= ((u32) data[3]) << 16;
300         val |= ((u32) data[4]) <<  8;
301         val |= ((u32) data[5]) <<  0;
302         bw32(bp, B44_CAM_DATA_LO, val);
303         val = (CAM_DATA_HI_VALID |
304                (((u32) data[0]) << 8) |
305                (((u32) data[1]) << 0));
306         bw32(bp, B44_CAM_DATA_HI, val);
307         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
308                             (index << CAM_CTRL_INDEX_SHIFT)));
309         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
310 }
311
312 static inline void __b44_disable_ints(struct b44 *bp)
313 {
314         bw32(bp, B44_IMASK, 0);
315 }
316
317 static void b44_disable_ints(struct b44 *bp)
318 {
319         __b44_disable_ints(bp);
320
321         /* Flush posted writes. */
322         br32(bp, B44_IMASK);
323 }
324
325 static void b44_enable_ints(struct b44 *bp)
326 {
327         bw32(bp, B44_IMASK, bp->imask);
328 }
329
330 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
331 {
332         int err;
333
334         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
335         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
336                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
337                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
338                              (reg << MDIO_DATA_RA_SHIFT) |
339                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
340         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
341         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
342
343         return err;
344 }
345
346 static int b44_writephy(struct b44 *bp, int reg, u32 val)
347 {
348         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
349         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
350                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
351                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
352                              (reg << MDIO_DATA_RA_SHIFT) |
353                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
354                              (val & MDIO_DATA_DATA)));
355         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
356 }
357
358 /* miilib interface */
359 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
360  * due to code existing before miilib use was added to this driver.
361  * Someone should remove this artificial driver limitation in
362  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
363  */
364 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
365 {
366         u32 val;
367         struct b44 *bp = netdev_priv(dev);
368         int rc = b44_readphy(bp, location, &val);
369         if (rc)
370                 return 0xffffffff;
371         return val;
372 }
373
374 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
375                          int val)
376 {
377         struct b44 *bp = netdev_priv(dev);
378         b44_writephy(bp, location, val);
379 }
380
381 static int b44_phy_reset(struct b44 *bp)
382 {
383         u32 val;
384         int err;
385
386         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
387         if (err)
388                 return err;
389         udelay(100);
390         err = b44_readphy(bp, MII_BMCR, &val);
391         if (!err) {
392                 if (val & BMCR_RESET) {
393                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
394                                bp->dev->name);
395                         err = -ENODEV;
396                 }
397         }
398
399         return 0;
400 }
401
402 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
403 {
404         u32 val;
405
406         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
407         bp->flags |= pause_flags;
408
409         val = br32(bp, B44_RXCONFIG);
410         if (pause_flags & B44_FLAG_RX_PAUSE)
411                 val |= RXCONFIG_FLOW;
412         else
413                 val &= ~RXCONFIG_FLOW;
414         bw32(bp, B44_RXCONFIG, val);
415
416         val = br32(bp, B44_MAC_FLOW);
417         if (pause_flags & B44_FLAG_TX_PAUSE)
418                 val |= (MAC_FLOW_PAUSE_ENAB |
419                         (0xc0 & MAC_FLOW_RX_HI_WATER));
420         else
421                 val &= ~MAC_FLOW_PAUSE_ENAB;
422         bw32(bp, B44_MAC_FLOW, val);
423 }
424
425 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
426 {
427         u32 pause_enab = 0;
428
429         /* The driver supports only rx pause by default because
430            the b44 mac tx pause mechanism generates excessive
431            pause frames.
432            Use ethtool to turn on b44 tx pause if necessary.
433          */
434         if ((local & ADVERTISE_PAUSE_CAP) &&
435             (local & ADVERTISE_PAUSE_ASYM)){
436                 if ((remote & LPA_PAUSE_ASYM) &&
437                     !(remote & LPA_PAUSE_CAP))
438                         pause_enab |= B44_FLAG_RX_PAUSE;
439         }
440
441         __b44_set_flow_ctrl(bp, pause_enab);
442 }
443
444 static int b44_setup_phy(struct b44 *bp)
445 {
446         u32 val;
447         int err;
448
449         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
450                 goto out;
451         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
452                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
453                 goto out;
454         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
455                 goto out;
456         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
457                                 val | MII_TLEDCTRL_ENABLE)) != 0)
458                 goto out;
459
460         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
461                 u32 adv = ADVERTISE_CSMA;
462
463                 if (bp->flags & B44_FLAG_ADV_10HALF)
464                         adv |= ADVERTISE_10HALF;
465                 if (bp->flags & B44_FLAG_ADV_10FULL)
466                         adv |= ADVERTISE_10FULL;
467                 if (bp->flags & B44_FLAG_ADV_100HALF)
468                         adv |= ADVERTISE_100HALF;
469                 if (bp->flags & B44_FLAG_ADV_100FULL)
470                         adv |= ADVERTISE_100FULL;
471
472                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
473                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
474
475                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
476                         goto out;
477                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
478                                                        BMCR_ANRESTART))) != 0)
479                         goto out;
480         } else {
481                 u32 bmcr;
482
483                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
484                         goto out;
485                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
486                 if (bp->flags & B44_FLAG_100_BASE_T)
487                         bmcr |= BMCR_SPEED100;
488                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
489                         bmcr |= BMCR_FULLDPLX;
490                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
491                         goto out;
492
493                 /* Since we will not be negotiating there is no safe way
494                  * to determine if the link partner supports flow control
495                  * or not.  So just disable it completely in this case.
496                  */
497                 b44_set_flow_ctrl(bp, 0, 0);
498         }
499
500 out:
501         return err;
502 }
503
504 static void b44_stats_update(struct b44 *bp)
505 {
506         unsigned long reg;
507         u32 *val;
508
509         val = &bp->hw_stats.tx_good_octets;
510         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
511                 *val++ += br32(bp, reg);
512         }
513
514         /* Pad */
515         reg += 8*4UL;
516
517         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
518                 *val++ += br32(bp, reg);
519         }
520 }
521
522 static void b44_link_report(struct b44 *bp)
523 {
524         if (!netif_carrier_ok(bp->dev)) {
525                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
526         } else {
527                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
528                        bp->dev->name,
529                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
530                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
531
532                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
533                        "%s for RX.\n",
534                        bp->dev->name,
535                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
536                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
537         }
538 }
539
540 static void b44_check_phy(struct b44 *bp)
541 {
542         u32 bmsr, aux;
543
544         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
545             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
546             (bmsr != 0xffff)) {
547                 if (aux & MII_AUXCTRL_SPEED)
548                         bp->flags |= B44_FLAG_100_BASE_T;
549                 else
550                         bp->flags &= ~B44_FLAG_100_BASE_T;
551                 if (aux & MII_AUXCTRL_DUPLEX)
552                         bp->flags |= B44_FLAG_FULL_DUPLEX;
553                 else
554                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
555
556                 if (!netif_carrier_ok(bp->dev) &&
557                     (bmsr & BMSR_LSTATUS)) {
558                         u32 val = br32(bp, B44_TX_CTRL);
559                         u32 local_adv, remote_adv;
560
561                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
562                                 val |= TX_CTRL_DUPLEX;
563                         else
564                                 val &= ~TX_CTRL_DUPLEX;
565                         bw32(bp, B44_TX_CTRL, val);
566
567                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
568                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
569                             !b44_readphy(bp, MII_LPA, &remote_adv))
570                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
571
572                         /* Link now up */
573                         netif_carrier_on(bp->dev);
574                         b44_link_report(bp);
575                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
576                         /* Link now down */
577                         netif_carrier_off(bp->dev);
578                         b44_link_report(bp);
579                 }
580
581                 if (bmsr & BMSR_RFAULT)
582                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
583                                bp->dev->name);
584                 if (bmsr & BMSR_JCD)
585                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
586                                bp->dev->name);
587         }
588 }
589
590 static void b44_timer(unsigned long __opaque)
591 {
592         struct b44 *bp = (struct b44 *) __opaque;
593
594         spin_lock_irq(&bp->lock);
595
596         b44_check_phy(bp);
597
598         b44_stats_update(bp);
599
600         spin_unlock_irq(&bp->lock);
601
602         bp->timer.expires = jiffies + HZ;
603         add_timer(&bp->timer);
604 }
605
606 static void b44_tx(struct b44 *bp)
607 {
608         u32 cur, cons;
609
610         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611         cur /= sizeof(struct dma_desc);
612
613         /* XXX needs updating when NETIF_F_SG is supported */
614         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615                 struct ring_info *rp = &bp->tx_buffers[cons];
616                 struct sk_buff *skb = rp->skb;
617
618                 BUG_ON(skb == NULL);
619
620                 pci_unmap_single(bp->pdev,
621                                  pci_unmap_addr(rp, mapping),
622                                  skb->len,
623                                  PCI_DMA_TODEVICE);
624                 rp->skb = NULL;
625                 dev_kfree_skb_irq(skb);
626         }
627
628         bp->tx_cons = cons;
629         if (netif_queue_stopped(bp->dev) &&
630             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631                 netif_wake_queue(bp->dev);
632
633         bw32(bp, B44_GPTIMER, 0);
634 }
635
636 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
637  * before the DMA address you give it.  So we allocate 30 more bytes
638  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639  * point the chip at 30 bytes past where the rx_header will go.
640  */
641 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
642 {
643         struct dma_desc *dp;
644         struct ring_info *src_map, *map;
645         struct rx_header *rh;
646         struct sk_buff *skb;
647         dma_addr_t mapping;
648         int dest_idx;
649         u32 ctrl;
650
651         src_map = NULL;
652         if (src_idx >= 0)
653                 src_map = &bp->rx_buffers[src_idx];
654         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655         map = &bp->rx_buffers[dest_idx];
656         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
657         if (skb == NULL)
658                 return -ENOMEM;
659
660         mapping = pci_map_single(bp->pdev, skb->data,
661                                  RX_PKT_BUF_SZ,
662                                  PCI_DMA_FROMDEVICE);
663
664         /* Hardware bug work-around, the chip is unable to do PCI DMA
665            to/from anything above 1GB :-( */
666         if (dma_mapping_error(mapping) ||
667                 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
668                 /* Sigh... */
669                 if (!dma_mapping_error(mapping))
670                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671                 dev_kfree_skb_any(skb);
672                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
673                 if (skb == NULL)
674                         return -ENOMEM;
675                 mapping = pci_map_single(bp->pdev, skb->data,
676                                          RX_PKT_BUF_SZ,
677                                          PCI_DMA_FROMDEVICE);
678                 if (dma_mapping_error(mapping) ||
679                         mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680                         if (!dma_mapping_error(mapping))
681                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
682                         dev_kfree_skb_any(skb);
683                         return -ENOMEM;
684                 }
685         }
686
687         skb->dev = bp->dev;
688         skb_reserve(skb, bp->rx_offset);
689
690         rh = (struct rx_header *)
691                 (skb->data - bp->rx_offset);
692         rh->len = 0;
693         rh->flags = 0;
694
695         map->skb = skb;
696         pci_unmap_addr_set(map, mapping, mapping);
697
698         if (src_map != NULL)
699                 src_map->skb = NULL;
700
701         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
702         if (dest_idx == (B44_RX_RING_SIZE - 1))
703                 ctrl |= DESC_CTRL_EOT;
704
705         dp = &bp->rx_ring[dest_idx];
706         dp->ctrl = cpu_to_le32(ctrl);
707         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
708
709         if (bp->flags & B44_FLAG_RX_RING_HACK)
710                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
711                                              dest_idx * sizeof(dp),
712                                              DMA_BIDIRECTIONAL);
713
714         return RX_PKT_BUF_SZ;
715 }
716
717 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
718 {
719         struct dma_desc *src_desc, *dest_desc;
720         struct ring_info *src_map, *dest_map;
721         struct rx_header *rh;
722         int dest_idx;
723         __le32 ctrl;
724
725         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
726         dest_desc = &bp->rx_ring[dest_idx];
727         dest_map = &bp->rx_buffers[dest_idx];
728         src_desc = &bp->rx_ring[src_idx];
729         src_map = &bp->rx_buffers[src_idx];
730
731         dest_map->skb = src_map->skb;
732         rh = (struct rx_header *) src_map->skb->data;
733         rh->len = 0;
734         rh->flags = 0;
735         pci_unmap_addr_set(dest_map, mapping,
736                            pci_unmap_addr(src_map, mapping));
737
738         if (bp->flags & B44_FLAG_RX_RING_HACK)
739                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
740                                           src_idx * sizeof(src_desc),
741                                           DMA_BIDIRECTIONAL);
742
743         ctrl = src_desc->ctrl;
744         if (dest_idx == (B44_RX_RING_SIZE - 1))
745                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
746         else
747                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
748
749         dest_desc->ctrl = ctrl;
750         dest_desc->addr = src_desc->addr;
751
752         src_map->skb = NULL;
753
754         if (bp->flags & B44_FLAG_RX_RING_HACK)
755                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
756                                              dest_idx * sizeof(dest_desc),
757                                              DMA_BIDIRECTIONAL);
758
759         pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
760                                        RX_PKT_BUF_SZ,
761                                        PCI_DMA_FROMDEVICE);
762 }
763
764 static int b44_rx(struct b44 *bp, int budget)
765 {
766         int received;
767         u32 cons, prod;
768
769         received = 0;
770         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
771         prod /= sizeof(struct dma_desc);
772         cons = bp->rx_cons;
773
774         while (cons != prod && budget > 0) {
775                 struct ring_info *rp = &bp->rx_buffers[cons];
776                 struct sk_buff *skb = rp->skb;
777                 dma_addr_t map = pci_unmap_addr(rp, mapping);
778                 struct rx_header *rh;
779                 u16 len;
780
781                 pci_dma_sync_single_for_cpu(bp->pdev, map,
782                                             RX_PKT_BUF_SZ,
783                                             PCI_DMA_FROMDEVICE);
784                 rh = (struct rx_header *) skb->data;
785                 len = le16_to_cpu(rh->len);
786                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
787                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
788                 drop_it:
789                         b44_recycle_rx(bp, cons, bp->rx_prod);
790                 drop_it_no_recycle:
791                         bp->stats.rx_dropped++;
792                         goto next_pkt;
793                 }
794
795                 if (len == 0) {
796                         int i = 0;
797
798                         do {
799                                 udelay(2);
800                                 barrier();
801                                 len = le16_to_cpu(rh->len);
802                         } while (len == 0 && i++ < 5);
803                         if (len == 0)
804                                 goto drop_it;
805                 }
806
807                 /* Omit CRC. */
808                 len -= 4;
809
810                 if (len > RX_COPY_THRESHOLD) {
811                         int skb_size;
812                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
813                         if (skb_size < 0)
814                                 goto drop_it;
815                         pci_unmap_single(bp->pdev, map,
816                                          skb_size, PCI_DMA_FROMDEVICE);
817                         /* Leave out rx_header */
818                         skb_put(skb, len+bp->rx_offset);
819                         skb_pull(skb,bp->rx_offset);
820                 } else {
821                         struct sk_buff *copy_skb;
822
823                         b44_recycle_rx(bp, cons, bp->rx_prod);
824                         copy_skb = dev_alloc_skb(len + 2);
825                         if (copy_skb == NULL)
826                                 goto drop_it_no_recycle;
827
828                         skb_reserve(copy_skb, 2);
829                         skb_put(copy_skb, len);
830                         /* DMA sync done above, copy just the actual packet */
831                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
832
833                         skb = copy_skb;
834                 }
835                 skb->ip_summed = CHECKSUM_NONE;
836                 skb->protocol = eth_type_trans(skb, bp->dev);
837                 netif_receive_skb(skb);
838                 bp->dev->last_rx = jiffies;
839                 received++;
840                 budget--;
841         next_pkt:
842                 bp->rx_prod = (bp->rx_prod + 1) &
843                         (B44_RX_RING_SIZE - 1);
844                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
845         }
846
847         bp->rx_cons = cons;
848         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
849
850         return received;
851 }
852
853 static int b44_poll(struct net_device *netdev, int *budget)
854 {
855         struct b44 *bp = netdev_priv(netdev);
856         int done;
857
858         spin_lock_irq(&bp->lock);
859
860         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
861                 /* spin_lock(&bp->tx_lock); */
862                 b44_tx(bp);
863                 /* spin_unlock(&bp->tx_lock); */
864         }
865         spin_unlock_irq(&bp->lock);
866
867         done = 1;
868         if (bp->istat & ISTAT_RX) {
869                 int orig_budget = *budget;
870                 int work_done;
871
872                 if (orig_budget > netdev->quota)
873                         orig_budget = netdev->quota;
874
875                 work_done = b44_rx(bp, orig_budget);
876
877                 *budget -= work_done;
878                 netdev->quota -= work_done;
879
880                 if (work_done >= orig_budget)
881                         done = 0;
882         }
883
884         if (bp->istat & ISTAT_ERRORS) {
885                 unsigned long flags;
886
887                 spin_lock_irqsave(&bp->lock, flags);
888                 b44_halt(bp);
889                 b44_init_rings(bp);
890                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
891                 netif_wake_queue(bp->dev);
892                 spin_unlock_irqrestore(&bp->lock, flags);
893                 done = 1;
894         }
895
896         if (done) {
897                 netif_rx_complete(netdev);
898                 b44_enable_ints(bp);
899         }
900
901         return (done ? 0 : 1);
902 }
903
904 static irqreturn_t b44_interrupt(int irq, void *dev_id)
905 {
906         struct net_device *dev = dev_id;
907         struct b44 *bp = netdev_priv(dev);
908         u32 istat, imask;
909         int handled = 0;
910
911         spin_lock(&bp->lock);
912
913         istat = br32(bp, B44_ISTAT);
914         imask = br32(bp, B44_IMASK);
915
916         /* The interrupt mask register controls which interrupt bits
917          * will actually raise an interrupt to the CPU when set by hw/firmware,
918          * but doesn't mask off the bits.
919          */
920         istat &= imask;
921         if (istat) {
922                 handled = 1;
923
924                 if (unlikely(!netif_running(dev))) {
925                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
926                         goto irq_ack;
927                 }
928
929                 if (netif_rx_schedule_prep(dev)) {
930                         /* NOTE: These writes are posted by the readback of
931                          *       the ISTAT register below.
932                          */
933                         bp->istat = istat;
934                         __b44_disable_ints(bp);
935                         __netif_rx_schedule(dev);
936                 } else {
937                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
938                                dev->name);
939                 }
940
941 irq_ack:
942                 bw32(bp, B44_ISTAT, istat);
943                 br32(bp, B44_ISTAT);
944         }
945         spin_unlock(&bp->lock);
946         return IRQ_RETVAL(handled);
947 }
948
949 static void b44_tx_timeout(struct net_device *dev)
950 {
951         struct b44 *bp = netdev_priv(dev);
952
953         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
954                dev->name);
955
956         spin_lock_irq(&bp->lock);
957
958         b44_halt(bp);
959         b44_init_rings(bp);
960         b44_init_hw(bp, B44_FULL_RESET);
961
962         spin_unlock_irq(&bp->lock);
963
964         b44_enable_ints(bp);
965
966         netif_wake_queue(dev);
967 }
968
969 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 {
971         struct b44 *bp = netdev_priv(dev);
972         struct sk_buff *bounce_skb;
973         int rc = NETDEV_TX_OK;
974         dma_addr_t mapping;
975         u32 len, entry, ctrl;
976
977         len = skb->len;
978         spin_lock_irq(&bp->lock);
979
980         /* This is a hard error, log it. */
981         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
982                 netif_stop_queue(dev);
983                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
984                        dev->name);
985                 goto err_out;
986         }
987
988         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
989         if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
990                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
991                 if (!dma_mapping_error(mapping))
992                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
993
994                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
995                                              GFP_ATOMIC|GFP_DMA);
996                 if (!bounce_skb)
997                         goto err_out;
998
999                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
1000                                          len, PCI_DMA_TODEVICE);
1001                 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1002                         if (!dma_mapping_error(mapping))
1003                                 pci_unmap_single(bp->pdev, mapping,
1004                                          len, PCI_DMA_TODEVICE);
1005                         dev_kfree_skb_any(bounce_skb);
1006                         goto err_out;
1007                 }
1008
1009                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1010                 dev_kfree_skb_any(skb);
1011                 skb = bounce_skb;
1012         }
1013
1014         entry = bp->tx_prod;
1015         bp->tx_buffers[entry].skb = skb;
1016         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1017
1018         ctrl  = (len & DESC_CTRL_LEN);
1019         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1020         if (entry == (B44_TX_RING_SIZE - 1))
1021                 ctrl |= DESC_CTRL_EOT;
1022
1023         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1025
1026         if (bp->flags & B44_FLAG_TX_RING_HACK)
1027                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1028                                              entry * sizeof(bp->tx_ring[0]),
1029                                              DMA_TO_DEVICE);
1030
1031         entry = NEXT_TX(entry);
1032
1033         bp->tx_prod = entry;
1034
1035         wmb();
1036
1037         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040         if (bp->flags & B44_FLAG_REORDER_BUG)
1041                 br32(bp, B44_DMATX_PTR);
1042
1043         if (TX_BUFFS_AVAIL(bp) < 1)
1044                 netif_stop_queue(dev);
1045
1046         dev->trans_start = jiffies;
1047
1048 out_unlock:
1049         spin_unlock_irq(&bp->lock);
1050
1051         return rc;
1052
1053 err_out:
1054         rc = NETDEV_TX_BUSY;
1055         goto out_unlock;
1056 }
1057
1058 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1059 {
1060         struct b44 *bp = netdev_priv(dev);
1061
1062         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1063                 return -EINVAL;
1064
1065         if (!netif_running(dev)) {
1066                 /* We'll just catch it later when the
1067                  * device is up'd.
1068                  */
1069                 dev->mtu = new_mtu;
1070                 return 0;
1071         }
1072
1073         spin_lock_irq(&bp->lock);
1074         b44_halt(bp);
1075         dev->mtu = new_mtu;
1076         b44_init_rings(bp);
1077         b44_init_hw(bp, B44_FULL_RESET);
1078         spin_unlock_irq(&bp->lock);
1079
1080         b44_enable_ints(bp);
1081
1082         return 0;
1083 }
1084
1085 /* Free up pending packets in all rx/tx rings.
1086  *
1087  * The chip has been shut down and the driver detached from
1088  * the networking, so no interrupts or new tx packets will
1089  * end up in the driver.  bp->lock is not held and we are not
1090  * in an interrupt context and thus may sleep.
1091  */
1092 static void b44_free_rings(struct b44 *bp)
1093 {
1094         struct ring_info *rp;
1095         int i;
1096
1097         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1098                 rp = &bp->rx_buffers[i];
1099
1100                 if (rp->skb == NULL)
1101                         continue;
1102                 pci_unmap_single(bp->pdev,
1103                                  pci_unmap_addr(rp, mapping),
1104                                  RX_PKT_BUF_SZ,
1105                                  PCI_DMA_FROMDEVICE);
1106                 dev_kfree_skb_any(rp->skb);
1107                 rp->skb = NULL;
1108         }
1109
1110         /* XXX needs changes once NETIF_F_SG is set... */
1111         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1112                 rp = &bp->tx_buffers[i];
1113
1114                 if (rp->skb == NULL)
1115                         continue;
1116                 pci_unmap_single(bp->pdev,
1117                                  pci_unmap_addr(rp, mapping),
1118                                  rp->skb->len,
1119                                  PCI_DMA_TODEVICE);
1120                 dev_kfree_skb_any(rp->skb);
1121                 rp->skb = NULL;
1122         }
1123 }
1124
1125 /* Initialize tx/rx rings for packet processing.
1126  *
1127  * The chip has been shut down and the driver detached from
1128  * the networking, so no interrupts or new tx packets will
1129  * end up in the driver.
1130  */
1131 static void b44_init_rings(struct b44 *bp)
1132 {
1133         int i;
1134
1135         b44_free_rings(bp);
1136
1137         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1138         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1139
1140         if (bp->flags & B44_FLAG_RX_RING_HACK)
1141                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1142                                            DMA_TABLE_BYTES,
1143                                            PCI_DMA_BIDIRECTIONAL);
1144
1145         if (bp->flags & B44_FLAG_TX_RING_HACK)
1146                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1147                                            DMA_TABLE_BYTES,
1148                                            PCI_DMA_TODEVICE);
1149
1150         for (i = 0; i < bp->rx_pending; i++) {
1151                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1152                         break;
1153         }
1154 }
1155
1156 /*
1157  * Must not be invoked with interrupt sources disabled and
1158  * the hardware shutdown down.
1159  */
1160 static void b44_free_consistent(struct b44 *bp)
1161 {
1162         kfree(bp->rx_buffers);
1163         bp->rx_buffers = NULL;
1164         kfree(bp->tx_buffers);
1165         bp->tx_buffers = NULL;
1166         if (bp->rx_ring) {
1167                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1168                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1169                                          DMA_TABLE_BYTES,
1170                                          DMA_BIDIRECTIONAL);
1171                         kfree(bp->rx_ring);
1172                 } else
1173                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1174                                             bp->rx_ring, bp->rx_ring_dma);
1175                 bp->rx_ring = NULL;
1176                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1177         }
1178         if (bp->tx_ring) {
1179                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1180                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1181                                          DMA_TABLE_BYTES,
1182                                          DMA_TO_DEVICE);
1183                         kfree(bp->tx_ring);
1184                 } else
1185                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1186                                             bp->tx_ring, bp->tx_ring_dma);
1187                 bp->tx_ring = NULL;
1188                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1189         }
1190 }
1191
1192 /*
1193  * Must not be invoked with interrupt sources disabled and
1194  * the hardware shutdown down.  Can sleep.
1195  */
1196 static int b44_alloc_consistent(struct b44 *bp)
1197 {
1198         int size;
1199
1200         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1201         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1202         if (!bp->rx_buffers)
1203                 goto out_err;
1204
1205         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1206         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1207         if (!bp->tx_buffers)
1208                 goto out_err;
1209
1210         size = DMA_TABLE_BYTES;
1211         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1212         if (!bp->rx_ring) {
1213                 /* Allocation may have failed due to pci_alloc_consistent
1214                    insisting on use of GFP_DMA, which is more restrictive
1215                    than necessary...  */
1216                 struct dma_desc *rx_ring;
1217                 dma_addr_t rx_ring_dma;
1218
1219                 rx_ring = kzalloc(size, GFP_KERNEL);
1220                 if (!rx_ring)
1221                         goto out_err;
1222
1223                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1224                                              DMA_TABLE_BYTES,
1225                                              DMA_BIDIRECTIONAL);
1226
1227                 if (dma_mapping_error(rx_ring_dma) ||
1228                         rx_ring_dma + size > DMA_30BIT_MASK) {
1229                         kfree(rx_ring);
1230                         goto out_err;
1231                 }
1232
1233                 bp->rx_ring = rx_ring;
1234                 bp->rx_ring_dma = rx_ring_dma;
1235                 bp->flags |= B44_FLAG_RX_RING_HACK;
1236         }
1237
1238         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1239         if (!bp->tx_ring) {
1240                 /* Allocation may have failed due to pci_alloc_consistent
1241                    insisting on use of GFP_DMA, which is more restrictive
1242                    than necessary...  */
1243                 struct dma_desc *tx_ring;
1244                 dma_addr_t tx_ring_dma;
1245
1246                 tx_ring = kzalloc(size, GFP_KERNEL);
1247                 if (!tx_ring)
1248                         goto out_err;
1249
1250                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1251                                              DMA_TABLE_BYTES,
1252                                              DMA_TO_DEVICE);
1253
1254                 if (dma_mapping_error(tx_ring_dma) ||
1255                         tx_ring_dma + size > DMA_30BIT_MASK) {
1256                         kfree(tx_ring);
1257                         goto out_err;
1258                 }
1259
1260                 bp->tx_ring = tx_ring;
1261                 bp->tx_ring_dma = tx_ring_dma;
1262                 bp->flags |= B44_FLAG_TX_RING_HACK;
1263         }
1264
1265         return 0;
1266
1267 out_err:
1268         b44_free_consistent(bp);
1269         return -ENOMEM;
1270 }
1271
1272 /* bp->lock is held. */
1273 static void b44_clear_stats(struct b44 *bp)
1274 {
1275         unsigned long reg;
1276
1277         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1278         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1279                 br32(bp, reg);
1280         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1281                 br32(bp, reg);
1282 }
1283
1284 /* bp->lock is held. */
1285 static void b44_chip_reset(struct b44 *bp)
1286 {
1287         if (ssb_is_core_up(bp)) {
1288                 bw32(bp, B44_RCV_LAZY, 0);
1289                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1290                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1291                 bw32(bp, B44_DMATX_CTRL, 0);
1292                 bp->tx_prod = bp->tx_cons = 0;
1293                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1294                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1295                                      100, 0);
1296                 }
1297                 bw32(bp, B44_DMARX_CTRL, 0);
1298                 bp->rx_prod = bp->rx_cons = 0;
1299         } else {
1300                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1301                                    SBINTVEC_ENET0 :
1302                                    SBINTVEC_ENET1));
1303         }
1304
1305         ssb_core_reset(bp);
1306
1307         b44_clear_stats(bp);
1308
1309         /* Make PHY accessible. */
1310         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1311                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1312         br32(bp, B44_MDIO_CTRL);
1313
1314         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1315                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1316                 br32(bp, B44_ENET_CTRL);
1317                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1318         } else {
1319                 u32 val = br32(bp, B44_DEVCTRL);
1320
1321                 if (val & DEVCTRL_EPR) {
1322                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1323                         br32(bp, B44_DEVCTRL);
1324                         udelay(100);
1325                 }
1326                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1327         }
1328 }
1329
1330 /* bp->lock is held. */
1331 static void b44_halt(struct b44 *bp)
1332 {
1333         b44_disable_ints(bp);
1334         b44_chip_reset(bp);
1335 }
1336
1337 /* bp->lock is held. */
1338 static void __b44_set_mac_addr(struct b44 *bp)
1339 {
1340         bw32(bp, B44_CAM_CTRL, 0);
1341         if (!(bp->dev->flags & IFF_PROMISC)) {
1342                 u32 val;
1343
1344                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1345                 val = br32(bp, B44_CAM_CTRL);
1346                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1347         }
1348 }
1349
1350 static int b44_set_mac_addr(struct net_device *dev, void *p)
1351 {
1352         struct b44 *bp = netdev_priv(dev);
1353         struct sockaddr *addr = p;
1354
1355         if (netif_running(dev))
1356                 return -EBUSY;
1357
1358         if (!is_valid_ether_addr(addr->sa_data))
1359                 return -EINVAL;
1360
1361         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1362
1363         spin_lock_irq(&bp->lock);
1364         __b44_set_mac_addr(bp);
1365         spin_unlock_irq(&bp->lock);
1366
1367         return 0;
1368 }
1369
1370 /* Called at device open time to get the chip ready for
1371  * packet processing.  Invoked with bp->lock held.
1372  */
1373 static void __b44_set_rx_mode(struct net_device *);
1374 static void b44_init_hw(struct b44 *bp, int reset_kind)
1375 {
1376         u32 val;
1377
1378         b44_chip_reset(bp);
1379         if (reset_kind == B44_FULL_RESET) {
1380                 b44_phy_reset(bp);
1381                 b44_setup_phy(bp);
1382         }
1383
1384         /* Enable CRC32, set proper LED modes and power on PHY */
1385         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1386         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1387
1388         /* This sets the MAC address too.  */
1389         __b44_set_rx_mode(bp->dev);
1390
1391         /* MTU + eth header + possible VLAN tag + struct rx_header */
1392         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1393         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1394
1395         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1396         if (reset_kind == B44_PARTIAL_RESET) {
1397                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1398                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1399         } else {
1400                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1401                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1402                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1404                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1405
1406                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1407                 bp->rx_prod = bp->rx_pending;
1408
1409                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1410         }
1411
1412         val = br32(bp, B44_ENET_CTRL);
1413         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1414 }
1415
1416 static int b44_open(struct net_device *dev)
1417 {
1418         struct b44 *bp = netdev_priv(dev);
1419         int err;
1420
1421         err = b44_alloc_consistent(bp);
1422         if (err)
1423                 goto out;
1424
1425         b44_init_rings(bp);
1426         b44_init_hw(bp, B44_FULL_RESET);
1427
1428         b44_check_phy(bp);
1429
1430         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1431         if (unlikely(err < 0)) {
1432                 b44_chip_reset(bp);
1433                 b44_free_rings(bp);
1434                 b44_free_consistent(bp);
1435                 goto out;
1436         }
1437
1438         init_timer(&bp->timer);
1439         bp->timer.expires = jiffies + HZ;
1440         bp->timer.data = (unsigned long) bp;
1441         bp->timer.function = b44_timer;
1442         add_timer(&bp->timer);
1443
1444         b44_enable_ints(bp);
1445         netif_start_queue(dev);
1446 out:
1447         return err;
1448 }
1449
1450 #if 0
1451 /*static*/ void b44_dump_state(struct b44 *bp)
1452 {
1453         u32 val32, val32_2, val32_3, val32_4, val32_5;
1454         u16 val16;
1455
1456         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1457         printk("DEBUG: PCI status [%04x] \n", val16);
1458
1459 }
1460 #endif
1461
1462 #ifdef CONFIG_NET_POLL_CONTROLLER
1463 /*
1464  * Polling receive - used by netconsole and other diagnostic tools
1465  * to allow network i/o with interrupts disabled.
1466  */
1467 static void b44_poll_controller(struct net_device *dev)
1468 {
1469         disable_irq(dev->irq);
1470         b44_interrupt(dev->irq, dev);
1471         enable_irq(dev->irq);
1472 }
1473 #endif
1474
1475 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1476 {
1477         u32 i;
1478         u32 *pattern = (u32 *) pp;
1479
1480         for (i = 0; i < bytes; i += sizeof(u32)) {
1481                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1482                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1483         }
1484 }
1485
1486 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1487 {
1488         int magicsync = 6;
1489         int k, j, len = offset;
1490         int ethaddr_bytes = ETH_ALEN;
1491
1492         memset(ppattern + offset, 0xff, magicsync);
1493         for (j = 0; j < magicsync; j++)
1494                 set_bit(len++, (unsigned long *) pmask);
1495
1496         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1497                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1498                         ethaddr_bytes = ETH_ALEN;
1499                 else
1500                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1501                 if (ethaddr_bytes <=0)
1502                         break;
1503                 for (k = 0; k< ethaddr_bytes; k++) {
1504                         ppattern[offset + magicsync +
1505                                 (j * ETH_ALEN) + k] = macaddr[k];
1506                         len++;
1507                         set_bit(len, (unsigned long *) pmask);
1508                 }
1509         }
1510         return len - 1;
1511 }
1512
1513 /* Setup magic packet patterns in the b44 WOL
1514  * pattern matching filter.
1515  */
1516 static void b44_setup_pseudo_magicp(struct b44 *bp)
1517 {
1518
1519         u32 val;
1520         int plen0, plen1, plen2;
1521         u8 *pwol_pattern;
1522         u8 pwol_mask[B44_PMASK_SIZE];
1523
1524         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1525         if (!pwol_pattern) {
1526                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1527                 return;
1528         }
1529
1530         /* Ipv4 magic packet pattern - pattern 0.*/
1531         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532         memset(pwol_mask, 0, B44_PMASK_SIZE);
1533         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534                                   B44_ETHIPV4UDP_HLEN);
1535
1536         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1537         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1538
1539         /* Raw ethernet II magic packet pattern - pattern 1 */
1540         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1541         memset(pwol_mask, 0, B44_PMASK_SIZE);
1542         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1543                                   ETH_HLEN);
1544
1545         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1546                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1547         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1548                        B44_PMASK_BASE + B44_PMASK_SIZE);
1549
1550         /* Ipv6 magic packet pattern - pattern 2 */
1551         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1552         memset(pwol_mask, 0, B44_PMASK_SIZE);
1553         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1554                                   B44_ETHIPV6UDP_HLEN);
1555
1556         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1557                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1558         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1559                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1560
1561         kfree(pwol_pattern);
1562
1563         /* set these pattern's lengths: one less than each real length */
1564         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1565         bw32(bp, B44_WKUP_LEN, val);
1566
1567         /* enable wakeup pattern matching */
1568         val = br32(bp, B44_DEVCTRL);
1569         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1570
1571 }
1572
1573 static void b44_setup_wol(struct b44 *bp)
1574 {
1575         u32 val;
1576         u16 pmval;
1577
1578         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1579
1580         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1581
1582                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1583
1584                 val = bp->dev->dev_addr[2] << 24 |
1585                         bp->dev->dev_addr[3] << 16 |
1586                         bp->dev->dev_addr[4] << 8 |
1587                         bp->dev->dev_addr[5];
1588                 bw32(bp, B44_ADDR_LO, val);
1589
1590                 val = bp->dev->dev_addr[0] << 8 |
1591                         bp->dev->dev_addr[1];
1592                 bw32(bp, B44_ADDR_HI, val);
1593
1594                 val = br32(bp, B44_DEVCTRL);
1595                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1596
1597         } else {
1598                 b44_setup_pseudo_magicp(bp);
1599         }
1600
1601         val = br32(bp, B44_SBTMSLOW);
1602         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1603
1604         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1605         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1606
1607 }
1608
1609 static int b44_close(struct net_device *dev)
1610 {
1611         struct b44 *bp = netdev_priv(dev);
1612
1613         netif_stop_queue(dev);
1614
1615         netif_poll_disable(dev);
1616
1617         del_timer_sync(&bp->timer);
1618
1619         spin_lock_irq(&bp->lock);
1620
1621 #if 0
1622         b44_dump_state(bp);
1623 #endif
1624         b44_halt(bp);
1625         b44_free_rings(bp);
1626         netif_carrier_off(dev);
1627
1628         spin_unlock_irq(&bp->lock);
1629
1630         free_irq(dev->irq, dev);
1631
1632         netif_poll_enable(dev);
1633
1634         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1635                 b44_init_hw(bp, B44_PARTIAL_RESET);
1636                 b44_setup_wol(bp);
1637         }
1638
1639         b44_free_consistent(bp);
1640
1641         return 0;
1642 }
1643
1644 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1645 {
1646         struct b44 *bp = netdev_priv(dev);
1647         struct net_device_stats *nstat = &bp->stats;
1648         struct b44_hw_stats *hwstat = &bp->hw_stats;
1649
1650         /* Convert HW stats into netdevice stats. */
1651         nstat->rx_packets = hwstat->rx_pkts;
1652         nstat->tx_packets = hwstat->tx_pkts;
1653         nstat->rx_bytes   = hwstat->rx_octets;
1654         nstat->tx_bytes   = hwstat->tx_octets;
1655         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1656                              hwstat->tx_oversize_pkts +
1657                              hwstat->tx_underruns +
1658                              hwstat->tx_excessive_cols +
1659                              hwstat->tx_late_cols);
1660         nstat->multicast  = hwstat->tx_multicast_pkts;
1661         nstat->collisions = hwstat->tx_total_cols;
1662
1663         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1664                                    hwstat->rx_undersize);
1665         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1666         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1667         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1668         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1669                                    hwstat->rx_oversize_pkts +
1670                                    hwstat->rx_missed_pkts +
1671                                    hwstat->rx_crc_align_errs +
1672                                    hwstat->rx_undersize +
1673                                    hwstat->rx_crc_errs +
1674                                    hwstat->rx_align_errs +
1675                                    hwstat->rx_symbol_errs);
1676
1677         nstat->tx_aborted_errors = hwstat->tx_underruns;
1678 #if 0
1679         /* Carrier lost counter seems to be broken for some devices */
1680         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1681 #endif
1682
1683         return nstat;
1684 }
1685
1686 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1687 {
1688         struct dev_mc_list *mclist;
1689         int i, num_ents;
1690
1691         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1692         mclist = dev->mc_list;
1693         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1694                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1695         }
1696         return i+1;
1697 }
1698
1699 static void __b44_set_rx_mode(struct net_device *dev)
1700 {
1701         struct b44 *bp = netdev_priv(dev);
1702         u32 val;
1703
1704         val = br32(bp, B44_RXCONFIG);
1705         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1706         if (dev->flags & IFF_PROMISC) {
1707                 val |= RXCONFIG_PROMISC;
1708                 bw32(bp, B44_RXCONFIG, val);
1709         } else {
1710                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1711                 int i = 1;
1712
1713                 __b44_set_mac_addr(bp);
1714
1715                 if ((dev->flags & IFF_ALLMULTI) ||
1716                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1717                         val |= RXCONFIG_ALLMULTI;
1718                 else
1719                         i = __b44_load_mcast(bp, dev);
1720
1721                 for (; i < 64; i++)
1722                         __b44_cam_write(bp, zero, i);
1723
1724                 bw32(bp, B44_RXCONFIG, val);
1725                 val = br32(bp, B44_CAM_CTRL);
1726                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1727         }
1728 }
1729
1730 static void b44_set_rx_mode(struct net_device *dev)
1731 {
1732         struct b44 *bp = netdev_priv(dev);
1733
1734         spin_lock_irq(&bp->lock);
1735         __b44_set_rx_mode(dev);
1736         spin_unlock_irq(&bp->lock);
1737 }
1738
1739 static u32 b44_get_msglevel(struct net_device *dev)
1740 {
1741         struct b44 *bp = netdev_priv(dev);
1742         return bp->msg_enable;
1743 }
1744
1745 static void b44_set_msglevel(struct net_device *dev, u32 value)
1746 {
1747         struct b44 *bp = netdev_priv(dev);
1748         bp->msg_enable = value;
1749 }
1750
1751 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1752 {
1753         struct b44 *bp = netdev_priv(dev);
1754         struct pci_dev *pci_dev = bp->pdev;
1755
1756         strcpy (info->driver, DRV_MODULE_NAME);
1757         strcpy (info->version, DRV_MODULE_VERSION);
1758         strcpy (info->bus_info, pci_name(pci_dev));
1759 }
1760
1761 static int b44_nway_reset(struct net_device *dev)
1762 {
1763         struct b44 *bp = netdev_priv(dev);
1764         u32 bmcr;
1765         int r;
1766
1767         spin_lock_irq(&bp->lock);
1768         b44_readphy(bp, MII_BMCR, &bmcr);
1769         b44_readphy(bp, MII_BMCR, &bmcr);
1770         r = -EINVAL;
1771         if (bmcr & BMCR_ANENABLE) {
1772                 b44_writephy(bp, MII_BMCR,
1773                              bmcr | BMCR_ANRESTART);
1774                 r = 0;
1775         }
1776         spin_unlock_irq(&bp->lock);
1777
1778         return r;
1779 }
1780
1781 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1782 {
1783         struct b44 *bp = netdev_priv(dev);
1784
1785         cmd->supported = (SUPPORTED_Autoneg);
1786         cmd->supported |= (SUPPORTED_100baseT_Half |
1787                           SUPPORTED_100baseT_Full |
1788                           SUPPORTED_10baseT_Half |
1789                           SUPPORTED_10baseT_Full |
1790                           SUPPORTED_MII);
1791
1792         cmd->advertising = 0;
1793         if (bp->flags & B44_FLAG_ADV_10HALF)
1794                 cmd->advertising |= ADVERTISED_10baseT_Half;
1795         if (bp->flags & B44_FLAG_ADV_10FULL)
1796                 cmd->advertising |= ADVERTISED_10baseT_Full;
1797         if (bp->flags & B44_FLAG_ADV_100HALF)
1798                 cmd->advertising |= ADVERTISED_100baseT_Half;
1799         if (bp->flags & B44_FLAG_ADV_100FULL)
1800                 cmd->advertising |= ADVERTISED_100baseT_Full;
1801         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1802         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1803                 SPEED_100 : SPEED_10;
1804         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1805                 DUPLEX_FULL : DUPLEX_HALF;
1806         cmd->port = 0;
1807         cmd->phy_address = bp->phy_addr;
1808         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1809                 XCVR_INTERNAL : XCVR_EXTERNAL;
1810         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1811                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1812         if (cmd->autoneg == AUTONEG_ENABLE)
1813                 cmd->advertising |= ADVERTISED_Autoneg;
1814         if (!netif_running(dev)){
1815                 cmd->speed = 0;
1816                 cmd->duplex = 0xff;
1817         }
1818         cmd->maxtxpkt = 0;
1819         cmd->maxrxpkt = 0;
1820         return 0;
1821 }
1822
1823 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1824 {
1825         struct b44 *bp = netdev_priv(dev);
1826
1827         /* We do not support gigabit. */
1828         if (cmd->autoneg == AUTONEG_ENABLE) {
1829                 if (cmd->advertising &
1830                     (ADVERTISED_1000baseT_Half |
1831                      ADVERTISED_1000baseT_Full))
1832                         return -EINVAL;
1833         } else if ((cmd->speed != SPEED_100 &&
1834                     cmd->speed != SPEED_10) ||
1835                    (cmd->duplex != DUPLEX_HALF &&
1836                     cmd->duplex != DUPLEX_FULL)) {
1837                         return -EINVAL;
1838         }
1839
1840         spin_lock_irq(&bp->lock);
1841
1842         if (cmd->autoneg == AUTONEG_ENABLE) {
1843                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1844                                B44_FLAG_100_BASE_T |
1845                                B44_FLAG_FULL_DUPLEX |
1846                                B44_FLAG_ADV_10HALF |
1847                                B44_FLAG_ADV_10FULL |
1848                                B44_FLAG_ADV_100HALF |
1849                                B44_FLAG_ADV_100FULL);
1850                 if (cmd->advertising == 0) {
1851                         bp->flags |= (B44_FLAG_ADV_10HALF |
1852                                       B44_FLAG_ADV_10FULL |
1853                                       B44_FLAG_ADV_100HALF |
1854                                       B44_FLAG_ADV_100FULL);
1855                 } else {
1856                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1857                                 bp->flags |= B44_FLAG_ADV_10HALF;
1858                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1859                                 bp->flags |= B44_FLAG_ADV_10FULL;
1860                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1861                                 bp->flags |= B44_FLAG_ADV_100HALF;
1862                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1863                                 bp->flags |= B44_FLAG_ADV_100FULL;
1864                 }
1865         } else {
1866                 bp->flags |= B44_FLAG_FORCE_LINK;
1867                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1868                 if (cmd->speed == SPEED_100)
1869                         bp->flags |= B44_FLAG_100_BASE_T;
1870                 if (cmd->duplex == DUPLEX_FULL)
1871                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1872         }
1873
1874         if (netif_running(dev))
1875                 b44_setup_phy(bp);
1876
1877         spin_unlock_irq(&bp->lock);
1878
1879         return 0;
1880 }
1881
1882 static void b44_get_ringparam(struct net_device *dev,
1883                               struct ethtool_ringparam *ering)
1884 {
1885         struct b44 *bp = netdev_priv(dev);
1886
1887         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1888         ering->rx_pending = bp->rx_pending;
1889
1890         /* XXX ethtool lacks a tx_max_pending, oops... */
1891 }
1892
1893 static int b44_set_ringparam(struct net_device *dev,
1894                              struct ethtool_ringparam *ering)
1895 {
1896         struct b44 *bp = netdev_priv(dev);
1897
1898         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1899             (ering->rx_mini_pending != 0) ||
1900             (ering->rx_jumbo_pending != 0) ||
1901             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1902                 return -EINVAL;
1903
1904         spin_lock_irq(&bp->lock);
1905
1906         bp->rx_pending = ering->rx_pending;
1907         bp->tx_pending = ering->tx_pending;
1908
1909         b44_halt(bp);
1910         b44_init_rings(bp);
1911         b44_init_hw(bp, B44_FULL_RESET);
1912         netif_wake_queue(bp->dev);
1913         spin_unlock_irq(&bp->lock);
1914
1915         b44_enable_ints(bp);
1916
1917         return 0;
1918 }
1919
1920 static void b44_get_pauseparam(struct net_device *dev,
1921                                 struct ethtool_pauseparam *epause)
1922 {
1923         struct b44 *bp = netdev_priv(dev);
1924
1925         epause->autoneg =
1926                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1927         epause->rx_pause =
1928                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1929         epause->tx_pause =
1930                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1931 }
1932
1933 static int b44_set_pauseparam(struct net_device *dev,
1934                                 struct ethtool_pauseparam *epause)
1935 {
1936         struct b44 *bp = netdev_priv(dev);
1937
1938         spin_lock_irq(&bp->lock);
1939         if (epause->autoneg)
1940                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1941         else
1942                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1943         if (epause->rx_pause)
1944                 bp->flags |= B44_FLAG_RX_PAUSE;
1945         else
1946                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1947         if (epause->tx_pause)
1948                 bp->flags |= B44_FLAG_TX_PAUSE;
1949         else
1950                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1951         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1952                 b44_halt(bp);
1953                 b44_init_rings(bp);
1954                 b44_init_hw(bp, B44_FULL_RESET);
1955         } else {
1956                 __b44_set_flow_ctrl(bp, bp->flags);
1957         }
1958         spin_unlock_irq(&bp->lock);
1959
1960         b44_enable_ints(bp);
1961
1962         return 0;
1963 }
1964
1965 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1966 {
1967         switch(stringset) {
1968         case ETH_SS_STATS:
1969                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1970                 break;
1971         }
1972 }
1973
1974 static int b44_get_stats_count(struct net_device *dev)
1975 {
1976         return ARRAY_SIZE(b44_gstrings);
1977 }
1978
1979 static void b44_get_ethtool_stats(struct net_device *dev,
1980                                   struct ethtool_stats *stats, u64 *data)
1981 {
1982         struct b44 *bp = netdev_priv(dev);
1983         u32 *val = &bp->hw_stats.tx_good_octets;
1984         u32 i;
1985
1986         spin_lock_irq(&bp->lock);
1987
1988         b44_stats_update(bp);
1989
1990         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1991                 *data++ = *val++;
1992
1993         spin_unlock_irq(&bp->lock);
1994 }
1995
1996 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1997 {
1998         struct b44 *bp = netdev_priv(dev);
1999
2000         wol->supported = WAKE_MAGIC;
2001         if (bp->flags & B44_FLAG_WOL_ENABLE)
2002                 wol->wolopts = WAKE_MAGIC;
2003         else
2004                 wol->wolopts = 0;
2005         memset(&wol->sopass, 0, sizeof(wol->sopass));
2006 }
2007
2008 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2009 {
2010         struct b44 *bp = netdev_priv(dev);
2011
2012         spin_lock_irq(&bp->lock);
2013         if (wol->wolopts & WAKE_MAGIC)
2014                 bp->flags |= B44_FLAG_WOL_ENABLE;
2015         else
2016                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2017         spin_unlock_irq(&bp->lock);
2018
2019         return 0;
2020 }
2021
2022 static const struct ethtool_ops b44_ethtool_ops = {
2023         .get_drvinfo            = b44_get_drvinfo,
2024         .get_settings           = b44_get_settings,
2025         .set_settings           = b44_set_settings,
2026         .nway_reset             = b44_nway_reset,
2027         .get_link               = ethtool_op_get_link,
2028         .get_wol                = b44_get_wol,
2029         .set_wol                = b44_set_wol,
2030         .get_ringparam          = b44_get_ringparam,
2031         .set_ringparam          = b44_set_ringparam,
2032         .get_pauseparam         = b44_get_pauseparam,
2033         .set_pauseparam         = b44_set_pauseparam,
2034         .get_msglevel           = b44_get_msglevel,
2035         .set_msglevel           = b44_set_msglevel,
2036         .get_strings            = b44_get_strings,
2037         .get_stats_count        = b44_get_stats_count,
2038         .get_ethtool_stats      = b44_get_ethtool_stats,
2039         .get_perm_addr          = ethtool_op_get_perm_addr,
2040 };
2041
2042 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2043 {
2044         struct mii_ioctl_data *data = if_mii(ifr);
2045         struct b44 *bp = netdev_priv(dev);
2046         int err = -EINVAL;
2047
2048         if (!netif_running(dev))
2049                 goto out;
2050
2051         spin_lock_irq(&bp->lock);
2052         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2053         spin_unlock_irq(&bp->lock);
2054 out:
2055         return err;
2056 }
2057
2058 /* Read 128-bytes of EEPROM. */
2059 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2060 {
2061         long i;
2062         __le16 *ptr = (__le16 *) data;
2063
2064         for (i = 0; i < 128; i += 2)
2065                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2066
2067         return 0;
2068 }
2069
2070 static int __devinit b44_get_invariants(struct b44 *bp)
2071 {
2072         u8 eeprom[128];
2073         int err;
2074
2075         err = b44_read_eeprom(bp, &eeprom[0]);
2076         if (err)
2077                 goto out;
2078
2079         bp->dev->dev_addr[0] = eeprom[79];
2080         bp->dev->dev_addr[1] = eeprom[78];
2081         bp->dev->dev_addr[2] = eeprom[81];
2082         bp->dev->dev_addr[3] = eeprom[80];
2083         bp->dev->dev_addr[4] = eeprom[83];
2084         bp->dev->dev_addr[5] = eeprom[82];
2085
2086         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2087                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2088                 return -EINVAL;
2089         }
2090
2091         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2092
2093         bp->phy_addr = eeprom[90] & 0x1f;
2094
2095         /* With this, plus the rx_header prepended to the data by the
2096          * hardware, we'll land the ethernet header on a 2-byte boundary.
2097          */
2098         bp->rx_offset = 30;
2099
2100         bp->imask = IMASK_DEF;
2101
2102         bp->core_unit = ssb_core_unit(bp);
2103         bp->dma_offset = SB_PCI_DMA;
2104
2105         /* XXX - really required?
2106            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2107          */
2108
2109         if (ssb_get_core_rev(bp) >= 7)
2110                 bp->flags |= B44_FLAG_B0_ANDLATER;
2111
2112 out:
2113         return err;
2114 }
2115
2116 static int __devinit b44_init_one(struct pci_dev *pdev,
2117                                   const struct pci_device_id *ent)
2118 {
2119         static int b44_version_printed = 0;
2120         unsigned long b44reg_base, b44reg_len;
2121         struct net_device *dev;
2122         struct b44 *bp;
2123         int err, i;
2124
2125         if (b44_version_printed++ == 0)
2126                 printk(KERN_INFO "%s", version);
2127
2128         err = pci_enable_device(pdev);
2129         if (err) {
2130                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2131                        "aborting.\n");
2132                 return err;
2133         }
2134
2135         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2136                 dev_err(&pdev->dev,
2137                         "Cannot find proper PCI device "
2138                        "base address, aborting.\n");
2139                 err = -ENODEV;
2140                 goto err_out_disable_pdev;
2141         }
2142
2143         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2144         if (err) {
2145                 dev_err(&pdev->dev,
2146                         "Cannot obtain PCI resources, aborting.\n");
2147                 goto err_out_disable_pdev;
2148         }
2149
2150         pci_set_master(pdev);
2151
2152         err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2153         if (err) {
2154                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2155                 goto err_out_free_res;
2156         }
2157
2158         err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2159         if (err) {
2160                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2161                 goto err_out_free_res;
2162         }
2163
2164         b44reg_base = pci_resource_start(pdev, 0);
2165         b44reg_len = pci_resource_len(pdev, 0);
2166
2167         dev = alloc_etherdev(sizeof(*bp));
2168         if (!dev) {
2169                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2170                 err = -ENOMEM;
2171                 goto err_out_free_res;
2172         }
2173
2174         SET_MODULE_OWNER(dev);
2175         SET_NETDEV_DEV(dev,&pdev->dev);
2176
2177         /* No interesting netdevice features in this card... */
2178         dev->features |= 0;
2179
2180         bp = netdev_priv(dev);
2181         bp->pdev = pdev;
2182         bp->dev = dev;
2183
2184         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2185
2186         spin_lock_init(&bp->lock);
2187
2188         bp->regs = ioremap(b44reg_base, b44reg_len);
2189         if (bp->regs == 0UL) {
2190                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2191                 err = -ENOMEM;
2192                 goto err_out_free_dev;
2193         }
2194
2195         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2196         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2197
2198         dev->open = b44_open;
2199         dev->stop = b44_close;
2200         dev->hard_start_xmit = b44_start_xmit;
2201         dev->get_stats = b44_get_stats;
2202         dev->set_multicast_list = b44_set_rx_mode;
2203         dev->set_mac_address = b44_set_mac_addr;
2204         dev->do_ioctl = b44_ioctl;
2205         dev->tx_timeout = b44_tx_timeout;
2206         dev->poll = b44_poll;
2207         dev->weight = 64;
2208         dev->watchdog_timeo = B44_TX_TIMEOUT;
2209 #ifdef CONFIG_NET_POLL_CONTROLLER
2210         dev->poll_controller = b44_poll_controller;
2211 #endif
2212         dev->change_mtu = b44_change_mtu;
2213         dev->irq = pdev->irq;
2214         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2215
2216         netif_carrier_off(dev);
2217
2218         err = b44_get_invariants(bp);
2219         if (err) {
2220                 dev_err(&pdev->dev,
2221                         "Problem fetching invariants of chip, aborting.\n");
2222                 goto err_out_iounmap;
2223         }
2224
2225         bp->mii_if.dev = dev;
2226         bp->mii_if.mdio_read = b44_mii_read;
2227         bp->mii_if.mdio_write = b44_mii_write;
2228         bp->mii_if.phy_id = bp->phy_addr;
2229         bp->mii_if.phy_id_mask = 0x1f;
2230         bp->mii_if.reg_num_mask = 0x1f;
2231
2232         /* By default, advertise all speed/duplex settings. */
2233         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2234                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2235
2236         /* By default, auto-negotiate PAUSE. */
2237         bp->flags |= B44_FLAG_PAUSE_AUTO;
2238
2239         err = register_netdev(dev);
2240         if (err) {
2241                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2242                 goto err_out_iounmap;
2243         }
2244
2245         pci_set_drvdata(pdev, dev);
2246
2247         pci_save_state(bp->pdev);
2248
2249         /* Chip reset provides power to the b44 MAC & PCI cores, which
2250          * is necessary for MAC register access.
2251          */
2252         b44_chip_reset(bp);
2253
2254         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2255         for (i = 0; i < 6; i++)
2256                 printk("%2.2x%c", dev->dev_addr[i],
2257                        i == 5 ? '\n' : ':');
2258
2259         return 0;
2260
2261 err_out_iounmap:
2262         iounmap(bp->regs);
2263
2264 err_out_free_dev:
2265         free_netdev(dev);
2266
2267 err_out_free_res:
2268         pci_release_regions(pdev);
2269
2270 err_out_disable_pdev:
2271         pci_disable_device(pdev);
2272         pci_set_drvdata(pdev, NULL);
2273         return err;
2274 }
2275
2276 static void __devexit b44_remove_one(struct pci_dev *pdev)
2277 {
2278         struct net_device *dev = pci_get_drvdata(pdev);
2279         struct b44 *bp = netdev_priv(dev);
2280
2281         unregister_netdev(dev);
2282         iounmap(bp->regs);
2283         free_netdev(dev);
2284         pci_release_regions(pdev);
2285         pci_disable_device(pdev);
2286         pci_set_drvdata(pdev, NULL);
2287 }
2288
2289 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2290 {
2291         struct net_device *dev = pci_get_drvdata(pdev);
2292         struct b44 *bp = netdev_priv(dev);
2293
2294         if (!netif_running(dev))
2295                  return 0;
2296
2297         del_timer_sync(&bp->timer);
2298
2299         spin_lock_irq(&bp->lock);
2300
2301         b44_halt(bp);
2302         netif_carrier_off(bp->dev);
2303         netif_device_detach(bp->dev);
2304         b44_free_rings(bp);
2305
2306         spin_unlock_irq(&bp->lock);
2307
2308         free_irq(dev->irq, dev);
2309         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2310                 b44_init_hw(bp, B44_PARTIAL_RESET);
2311                 b44_setup_wol(bp);
2312         }
2313         pci_disable_device(pdev);
2314         return 0;
2315 }
2316
2317 static int b44_resume(struct pci_dev *pdev)
2318 {
2319         struct net_device *dev = pci_get_drvdata(pdev);
2320         struct b44 *bp = netdev_priv(dev);
2321         int rc = 0;
2322
2323         pci_restore_state(pdev);
2324         rc = pci_enable_device(pdev);
2325         if (rc) {
2326                 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2327                         dev->name);
2328                 return rc;
2329         }
2330
2331         pci_set_master(pdev);
2332
2333         if (!netif_running(dev))
2334                 return 0;
2335
2336         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2337         if (rc) {
2338                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2339                 pci_disable_device(pdev);
2340                 return rc;
2341         }
2342
2343         spin_lock_irq(&bp->lock);
2344
2345         b44_init_rings(bp);
2346         b44_init_hw(bp, B44_FULL_RESET);
2347         netif_device_attach(bp->dev);
2348         spin_unlock_irq(&bp->lock);
2349
2350         bp->timer.expires = jiffies + HZ;
2351         add_timer(&bp->timer);
2352
2353         b44_enable_ints(bp);
2354         netif_wake_queue(dev);
2355         return 0;
2356 }
2357
2358 static struct pci_driver b44_driver = {
2359         .name           = DRV_MODULE_NAME,
2360         .id_table       = b44_pci_tbl,
2361         .probe          = b44_init_one,
2362         .remove         = __devexit_p(b44_remove_one),
2363         .suspend        = b44_suspend,
2364         .resume         = b44_resume,
2365 };
2366
2367 static int __init b44_init(void)
2368 {
2369         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2370
2371         /* Setup paramaters for syncing RX/TX DMA descriptors */
2372         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2373         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2374
2375         return pci_register_driver(&b44_driver);
2376 }
2377
2378 static void __exit b44_cleanup(void)
2379 {
2380         pci_unregister_driver(&b44_driver);
2381 }
2382
2383 module_init(b44_init);
2384 module_exit(b44_cleanup);
2385