b44: Ratelimit timeout error message.
[safe/jmp/linux-2.6] / drivers / net / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
9  *
10  * Distribute under GPL.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/netdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/etherdevice.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/ssb/ssb.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32
33
34 #include "b44.h"
35
36 #define DRV_MODULE_NAME         "b44"
37 #define PFX DRV_MODULE_NAME     ": "
38 #define DRV_MODULE_VERSION      "2.0"
39
40 #define B44_DEF_MSG_ENABLE        \
41         (NETIF_MSG_DRV          | \
42          NETIF_MSG_PROBE        | \
43          NETIF_MSG_LINK         | \
44          NETIF_MSG_TIMER        | \
45          NETIF_MSG_IFDOWN       | \
46          NETIF_MSG_IFUP         | \
47          NETIF_MSG_RX_ERR       | \
48          NETIF_MSG_TX_ERR)
49
50 /* length of time before we decide the hardware is borked,
51  * and dev->tx_timeout() should be called to fix the problem
52  */
53 #define B44_TX_TIMEOUT                  (5 * HZ)
54
55 /* hardware minimum and maximum for a single frame's data payload */
56 #define B44_MIN_MTU                     60
57 #define B44_MAX_MTU                     1500
58
59 #define B44_RX_RING_SIZE                512
60 #define B44_DEF_RX_RING_PENDING         200
61 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
62                                  B44_RX_RING_SIZE)
63 #define B44_TX_RING_SIZE                512
64 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
65 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
66                                  B44_TX_RING_SIZE)
67
68 #define TX_RING_GAP(BP) \
69         (B44_TX_RING_SIZE - (BP)->tx_pending)
70 #define TX_BUFFS_AVAIL(BP)                                              \
71         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
72           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
73           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
74 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
75
76 #define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
77 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
78
79 /* minimum number of free TX descriptors required to wake up TX process */
80 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
81
82 /* b44 internal pattern match filter info */
83 #define B44_PATTERN_BASE        0x400
84 #define B44_PATTERN_SIZE        0x80
85 #define B44_PMASK_BASE          0x600
86 #define B44_PMASK_SIZE          0x10
87 #define B44_MAX_PATTERNS        16
88 #define B44_ETHIPV6UDP_HLEN     62
89 #define B44_ETHIPV4UDP_HLEN     42
90
91 static char version[] __devinitdata =
92         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
93
94 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
95 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_MODULE_VERSION);
98
99 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
100 module_param(b44_debug, int, 0);
101 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
102
103
104 #ifdef CONFIG_B44_PCI
105 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
106         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109         { 0 } /* terminate list with empty entry */
110 };
111 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
112
113 static struct pci_driver b44_pci_driver = {
114         .name           = DRV_MODULE_NAME,
115         .id_table       = b44_pci_tbl,
116 };
117 #endif /* CONFIG_B44_PCI */
118
119 static const struct ssb_device_id b44_ssb_tbl[] = {
120         SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
121         SSB_DEVTABLE_END
122 };
123 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
124
125 static void b44_halt(struct b44 *);
126 static void b44_init_rings(struct b44 *);
127
128 #define B44_FULL_RESET          1
129 #define B44_FULL_RESET_SKIP_PHY 2
130 #define B44_PARTIAL_RESET       3
131 #define B44_CHIP_RESET_FULL     4
132 #define B44_CHIP_RESET_PARTIAL  5
133
134 static void b44_init_hw(struct b44 *, int);
135
136 static int dma_desc_align_mask;
137 static int dma_desc_sync_size;
138 static int instance;
139
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...)      # x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147                                                 dma_addr_t dma_base,
148                                                 unsigned long offset,
149                                                 enum dma_data_direction dir)
150 {
151         ssb_dma_sync_single_range_for_device(sdev, dma_base,
152                                              offset & dma_desc_align_mask,
153                                              dma_desc_sync_size, dir);
154 }
155
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157                                              dma_addr_t dma_base,
158                                              unsigned long offset,
159                                              enum dma_data_direction dir)
160 {
161         ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
162                                           offset & dma_desc_align_mask,
163                                           dma_desc_sync_size, dir);
164 }
165
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
167 {
168         return ssb_read32(bp->sdev, reg);
169 }
170
171 static inline void bw32(const struct b44 *bp,
172                         unsigned long reg, unsigned long val)
173 {
174         ssb_write32(bp->sdev, reg, val);
175 }
176
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178                         u32 bit, unsigned long timeout, const int clear)
179 {
180         unsigned long i;
181
182         for (i = 0; i < timeout; i++) {
183                 u32 val = br32(bp, reg);
184
185                 if (clear && !(val & bit))
186                         break;
187                 if (!clear && (val & bit))
188                         break;
189                 udelay(10);
190         }
191         if (i == timeout) {
192                 if (net_ratelimit())
193                         printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit "
194                                "%08x of register "
195                                "%lx to %s.\n",
196                                bp->dev->name,
197                                bit, reg,
198                                (clear ? "clear" : "set"));
199                 return -ENODEV;
200         }
201         return 0;
202 }
203
204 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
205 {
206         u32 val;
207
208         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
209                             (index << CAM_CTRL_INDEX_SHIFT)));
210
211         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
212
213         val = br32(bp, B44_CAM_DATA_LO);
214
215         data[2] = (val >> 24) & 0xFF;
216         data[3] = (val >> 16) & 0xFF;
217         data[4] = (val >> 8) & 0xFF;
218         data[5] = (val >> 0) & 0xFF;
219
220         val = br32(bp, B44_CAM_DATA_HI);
221
222         data[0] = (val >> 8) & 0xFF;
223         data[1] = (val >> 0) & 0xFF;
224 }
225
226 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
227 {
228         u32 val;
229
230         val  = ((u32) data[2]) << 24;
231         val |= ((u32) data[3]) << 16;
232         val |= ((u32) data[4]) <<  8;
233         val |= ((u32) data[5]) <<  0;
234         bw32(bp, B44_CAM_DATA_LO, val);
235         val = (CAM_DATA_HI_VALID |
236                (((u32) data[0]) << 8) |
237                (((u32) data[1]) << 0));
238         bw32(bp, B44_CAM_DATA_HI, val);
239         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
240                             (index << CAM_CTRL_INDEX_SHIFT)));
241         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
242 }
243
244 static inline void __b44_disable_ints(struct b44 *bp)
245 {
246         bw32(bp, B44_IMASK, 0);
247 }
248
249 static void b44_disable_ints(struct b44 *bp)
250 {
251         __b44_disable_ints(bp);
252
253         /* Flush posted writes. */
254         br32(bp, B44_IMASK);
255 }
256
257 static void b44_enable_ints(struct b44 *bp)
258 {
259         bw32(bp, B44_IMASK, bp->imask);
260 }
261
262 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
263 {
264         int err;
265
266         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
267         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
268                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
269                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
270                              (reg << MDIO_DATA_RA_SHIFT) |
271                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
272         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
273         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
274
275         return err;
276 }
277
278 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
279 {
280         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
281         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
282                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
283                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
284                              (reg << MDIO_DATA_RA_SHIFT) |
285                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
286                              (val & MDIO_DATA_DATA)));
287         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
288 }
289
290 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
291 {
292         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
293                 return 0;
294
295         return __b44_readphy(bp, bp->phy_addr, reg, val);
296 }
297
298 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
299 {
300         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
301                 return 0;
302
303         return __b44_writephy(bp, bp->phy_addr, reg, val);
304 }
305
306 /* miilib interface */
307 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
308 {
309         u32 val;
310         struct b44 *bp = netdev_priv(dev);
311         int rc = __b44_readphy(bp, phy_id, location, &val);
312         if (rc)
313                 return 0xffffffff;
314         return val;
315 }
316
317 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
318                          int val)
319 {
320         struct b44 *bp = netdev_priv(dev);
321         __b44_writephy(bp, phy_id, location, val);
322 }
323
324 static int b44_phy_reset(struct b44 *bp)
325 {
326         u32 val;
327         int err;
328
329         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
330                 return 0;
331         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
332         if (err)
333                 return err;
334         udelay(100);
335         err = b44_readphy(bp, MII_BMCR, &val);
336         if (!err) {
337                 if (val & BMCR_RESET) {
338                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
339                                bp->dev->name);
340                         err = -ENODEV;
341                 }
342         }
343
344         return 0;
345 }
346
347 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
348 {
349         u32 val;
350
351         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
352         bp->flags |= pause_flags;
353
354         val = br32(bp, B44_RXCONFIG);
355         if (pause_flags & B44_FLAG_RX_PAUSE)
356                 val |= RXCONFIG_FLOW;
357         else
358                 val &= ~RXCONFIG_FLOW;
359         bw32(bp, B44_RXCONFIG, val);
360
361         val = br32(bp, B44_MAC_FLOW);
362         if (pause_flags & B44_FLAG_TX_PAUSE)
363                 val |= (MAC_FLOW_PAUSE_ENAB |
364                         (0xc0 & MAC_FLOW_RX_HI_WATER));
365         else
366                 val &= ~MAC_FLOW_PAUSE_ENAB;
367         bw32(bp, B44_MAC_FLOW, val);
368 }
369
370 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
371 {
372         u32 pause_enab = 0;
373
374         /* The driver supports only rx pause by default because
375            the b44 mac tx pause mechanism generates excessive
376            pause frames.
377            Use ethtool to turn on b44 tx pause if necessary.
378          */
379         if ((local & ADVERTISE_PAUSE_CAP) &&
380             (local & ADVERTISE_PAUSE_ASYM)){
381                 if ((remote & LPA_PAUSE_ASYM) &&
382                     !(remote & LPA_PAUSE_CAP))
383                         pause_enab |= B44_FLAG_RX_PAUSE;
384         }
385
386         __b44_set_flow_ctrl(bp, pause_enab);
387 }
388
389 #ifdef SSB_DRIVER_MIPS
390 extern char *nvram_get(char *name);
391 static void b44_wap54g10_workaround(struct b44 *bp)
392 {
393         const char *str;
394         u32 val;
395         int err;
396
397         /*
398          * workaround for bad hardware design in Linksys WAP54G v1.0
399          * see https://dev.openwrt.org/ticket/146
400          * check and reset bit "isolate"
401          */
402         str = nvram_get("boardnum");
403         if (!str)
404                 return;
405         if (simple_strtoul(str, NULL, 0) == 2) {
406                 err = __b44_readphy(bp, 0, MII_BMCR, &val);
407                 if (err)
408                         goto error;
409                 if (!(val & BMCR_ISOLATE))
410                         return;
411                 val &= ~BMCR_ISOLATE;
412                 err = __b44_writephy(bp, 0, MII_BMCR, val);
413                 if (err)
414                         goto error;
415         }
416         return;
417 error:
418         printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
419 }
420 #else
421 static inline void b44_wap54g10_workaround(struct b44 *bp)
422 {
423 }
424 #endif
425
426 static int b44_setup_phy(struct b44 *bp)
427 {
428         u32 val;
429         int err;
430
431         b44_wap54g10_workaround(bp);
432
433         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
434                 return 0;
435         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
436                 goto out;
437         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
438                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
439                 goto out;
440         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
441                 goto out;
442         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
443                                 val | MII_TLEDCTRL_ENABLE)) != 0)
444                 goto out;
445
446         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
447                 u32 adv = ADVERTISE_CSMA;
448
449                 if (bp->flags & B44_FLAG_ADV_10HALF)
450                         adv |= ADVERTISE_10HALF;
451                 if (bp->flags & B44_FLAG_ADV_10FULL)
452                         adv |= ADVERTISE_10FULL;
453                 if (bp->flags & B44_FLAG_ADV_100HALF)
454                         adv |= ADVERTISE_100HALF;
455                 if (bp->flags & B44_FLAG_ADV_100FULL)
456                         adv |= ADVERTISE_100FULL;
457
458                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
459                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
460
461                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
462                         goto out;
463                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
464                                                        BMCR_ANRESTART))) != 0)
465                         goto out;
466         } else {
467                 u32 bmcr;
468
469                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
470                         goto out;
471                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
472                 if (bp->flags & B44_FLAG_100_BASE_T)
473                         bmcr |= BMCR_SPEED100;
474                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
475                         bmcr |= BMCR_FULLDPLX;
476                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
477                         goto out;
478
479                 /* Since we will not be negotiating there is no safe way
480                  * to determine if the link partner supports flow control
481                  * or not.  So just disable it completely in this case.
482                  */
483                 b44_set_flow_ctrl(bp, 0, 0);
484         }
485
486 out:
487         return err;
488 }
489
490 static void b44_stats_update(struct b44 *bp)
491 {
492         unsigned long reg;
493         u32 *val;
494
495         val = &bp->hw_stats.tx_good_octets;
496         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
497                 *val++ += br32(bp, reg);
498         }
499
500         /* Pad */
501         reg += 8*4UL;
502
503         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
504                 *val++ += br32(bp, reg);
505         }
506 }
507
508 static void b44_link_report(struct b44 *bp)
509 {
510         if (!netif_carrier_ok(bp->dev)) {
511                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
512         } else {
513                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
514                        bp->dev->name,
515                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
516                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
517
518                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
519                        "%s for RX.\n",
520                        bp->dev->name,
521                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
522                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
523         }
524 }
525
526 static void b44_check_phy(struct b44 *bp)
527 {
528         u32 bmsr, aux;
529
530         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
531                 bp->flags |= B44_FLAG_100_BASE_T;
532                 bp->flags |= B44_FLAG_FULL_DUPLEX;
533                 if (!netif_carrier_ok(bp->dev)) {
534                         u32 val = br32(bp, B44_TX_CTRL);
535                         val |= TX_CTRL_DUPLEX;
536                         bw32(bp, B44_TX_CTRL, val);
537                         netif_carrier_on(bp->dev);
538                         b44_link_report(bp);
539                 }
540                 return;
541         }
542
543         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
544             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
545             (bmsr != 0xffff)) {
546                 if (aux & MII_AUXCTRL_SPEED)
547                         bp->flags |= B44_FLAG_100_BASE_T;
548                 else
549                         bp->flags &= ~B44_FLAG_100_BASE_T;
550                 if (aux & MII_AUXCTRL_DUPLEX)
551                         bp->flags |= B44_FLAG_FULL_DUPLEX;
552                 else
553                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
554
555                 if (!netif_carrier_ok(bp->dev) &&
556                     (bmsr & BMSR_LSTATUS)) {
557                         u32 val = br32(bp, B44_TX_CTRL);
558                         u32 local_adv, remote_adv;
559
560                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
561                                 val |= TX_CTRL_DUPLEX;
562                         else
563                                 val &= ~TX_CTRL_DUPLEX;
564                         bw32(bp, B44_TX_CTRL, val);
565
566                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
567                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
568                             !b44_readphy(bp, MII_LPA, &remote_adv))
569                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
570
571                         /* Link now up */
572                         netif_carrier_on(bp->dev);
573                         b44_link_report(bp);
574                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
575                         /* Link now down */
576                         netif_carrier_off(bp->dev);
577                         b44_link_report(bp);
578                 }
579
580                 if (bmsr & BMSR_RFAULT)
581                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
582                                bp->dev->name);
583                 if (bmsr & BMSR_JCD)
584                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
585                                bp->dev->name);
586         }
587 }
588
589 static void b44_timer(unsigned long __opaque)
590 {
591         struct b44 *bp = (struct b44 *) __opaque;
592
593         spin_lock_irq(&bp->lock);
594
595         b44_check_phy(bp);
596
597         b44_stats_update(bp);
598
599         spin_unlock_irq(&bp->lock);
600
601         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
602 }
603
604 static void b44_tx(struct b44 *bp)
605 {
606         u32 cur, cons;
607
608         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
609         cur /= sizeof(struct dma_desc);
610
611         /* XXX needs updating when NETIF_F_SG is supported */
612         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
613                 struct ring_info *rp = &bp->tx_buffers[cons];
614                 struct sk_buff *skb = rp->skb;
615
616                 BUG_ON(skb == NULL);
617
618                 ssb_dma_unmap_single(bp->sdev,
619                                      rp->mapping,
620                                      skb->len,
621                                      DMA_TO_DEVICE);
622                 rp->skb = NULL;
623                 dev_kfree_skb_irq(skb);
624         }
625
626         bp->tx_cons = cons;
627         if (netif_queue_stopped(bp->dev) &&
628             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
629                 netif_wake_queue(bp->dev);
630
631         bw32(bp, B44_GPTIMER, 0);
632 }
633
634 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
635  * before the DMA address you give it.  So we allocate 30 more bytes
636  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
637  * point the chip at 30 bytes past where the rx_header will go.
638  */
639 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
640 {
641         struct dma_desc *dp;
642         struct ring_info *src_map, *map;
643         struct rx_header *rh;
644         struct sk_buff *skb;
645         dma_addr_t mapping;
646         int dest_idx;
647         u32 ctrl;
648
649         src_map = NULL;
650         if (src_idx >= 0)
651                 src_map = &bp->rx_buffers[src_idx];
652         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
653         map = &bp->rx_buffers[dest_idx];
654         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
655         if (skb == NULL)
656                 return -ENOMEM;
657
658         mapping = ssb_dma_map_single(bp->sdev, skb->data,
659                                      RX_PKT_BUF_SZ,
660                                      DMA_FROM_DEVICE);
661
662         /* Hardware bug work-around, the chip is unable to do PCI DMA
663            to/from anything above 1GB :-( */
664         if (ssb_dma_mapping_error(bp->sdev, mapping) ||
665                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
666                 /* Sigh... */
667                 if (!ssb_dma_mapping_error(bp->sdev, mapping))
668                         ssb_dma_unmap_single(bp->sdev, mapping,
669                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
670                 dev_kfree_skb_any(skb);
671                 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
672                 if (skb == NULL)
673                         return -ENOMEM;
674                 mapping = ssb_dma_map_single(bp->sdev, skb->data,
675                                              RX_PKT_BUF_SZ,
676                                              DMA_FROM_DEVICE);
677                 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
678                         mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
679                         if (!ssb_dma_mapping_error(bp->sdev, mapping))
680                                 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
681                         dev_kfree_skb_any(skb);
682                         return -ENOMEM;
683                 }
684                 bp->force_copybreak = 1;
685         }
686
687         rh = (struct rx_header *) skb->data;
688
689         rh->len = 0;
690         rh->flags = 0;
691
692         map->skb = skb;
693         map->mapping = mapping;
694
695         if (src_map != NULL)
696                 src_map->skb = NULL;
697
698         ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
699         if (dest_idx == (B44_RX_RING_SIZE - 1))
700                 ctrl |= DESC_CTRL_EOT;
701
702         dp = &bp->rx_ring[dest_idx];
703         dp->ctrl = cpu_to_le32(ctrl);
704         dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
705
706         if (bp->flags & B44_FLAG_RX_RING_HACK)
707                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
708                                             dest_idx * sizeof(*dp),
709                                             DMA_BIDIRECTIONAL);
710
711         return RX_PKT_BUF_SZ;
712 }
713
714 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
715 {
716         struct dma_desc *src_desc, *dest_desc;
717         struct ring_info *src_map, *dest_map;
718         struct rx_header *rh;
719         int dest_idx;
720         __le32 ctrl;
721
722         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
723         dest_desc = &bp->rx_ring[dest_idx];
724         dest_map = &bp->rx_buffers[dest_idx];
725         src_desc = &bp->rx_ring[src_idx];
726         src_map = &bp->rx_buffers[src_idx];
727
728         dest_map->skb = src_map->skb;
729         rh = (struct rx_header *) src_map->skb->data;
730         rh->len = 0;
731         rh->flags = 0;
732         dest_map->mapping = src_map->mapping;
733
734         if (bp->flags & B44_FLAG_RX_RING_HACK)
735                 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
736                                          src_idx * sizeof(*src_desc),
737                                          DMA_BIDIRECTIONAL);
738
739         ctrl = src_desc->ctrl;
740         if (dest_idx == (B44_RX_RING_SIZE - 1))
741                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
742         else
743                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
744
745         dest_desc->ctrl = ctrl;
746         dest_desc->addr = src_desc->addr;
747
748         src_map->skb = NULL;
749
750         if (bp->flags & B44_FLAG_RX_RING_HACK)
751                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
752                                              dest_idx * sizeof(*dest_desc),
753                                              DMA_BIDIRECTIONAL);
754
755         ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
756                                        RX_PKT_BUF_SZ,
757                                        DMA_FROM_DEVICE);
758 }
759
760 static int b44_rx(struct b44 *bp, int budget)
761 {
762         int received;
763         u32 cons, prod;
764
765         received = 0;
766         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767         prod /= sizeof(struct dma_desc);
768         cons = bp->rx_cons;
769
770         while (cons != prod && budget > 0) {
771                 struct ring_info *rp = &bp->rx_buffers[cons];
772                 struct sk_buff *skb = rp->skb;
773                 dma_addr_t map = rp->mapping;
774                 struct rx_header *rh;
775                 u16 len;
776
777                 ssb_dma_sync_single_for_cpu(bp->sdev, map,
778                                             RX_PKT_BUF_SZ,
779                                             DMA_FROM_DEVICE);
780                 rh = (struct rx_header *) skb->data;
781                 len = le16_to_cpu(rh->len);
782                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
783                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
784                 drop_it:
785                         b44_recycle_rx(bp, cons, bp->rx_prod);
786                 drop_it_no_recycle:
787                         bp->dev->stats.rx_dropped++;
788                         goto next_pkt;
789                 }
790
791                 if (len == 0) {
792                         int i = 0;
793
794                         do {
795                                 udelay(2);
796                                 barrier();
797                                 len = le16_to_cpu(rh->len);
798                         } while (len == 0 && i++ < 5);
799                         if (len == 0)
800                                 goto drop_it;
801                 }
802
803                 /* Omit CRC. */
804                 len -= 4;
805
806                 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
807                         int skb_size;
808                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809                         if (skb_size < 0)
810                                 goto drop_it;
811                         ssb_dma_unmap_single(bp->sdev, map,
812                                              skb_size, DMA_FROM_DEVICE);
813                         /* Leave out rx_header */
814                         skb_put(skb, len + RX_PKT_OFFSET);
815                         skb_pull(skb, RX_PKT_OFFSET);
816                 } else {
817                         struct sk_buff *copy_skb;
818
819                         b44_recycle_rx(bp, cons, bp->rx_prod);
820                         copy_skb = dev_alloc_skb(len + 2);
821                         if (copy_skb == NULL)
822                                 goto drop_it_no_recycle;
823
824                         skb_reserve(copy_skb, 2);
825                         skb_put(copy_skb, len);
826                         /* DMA sync done above, copy just the actual packet */
827                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
828                                                          copy_skb->data, len);
829                         skb = copy_skb;
830                 }
831                 skb->ip_summed = CHECKSUM_NONE;
832                 skb->protocol = eth_type_trans(skb, bp->dev);
833                 netif_receive_skb(skb);
834                 received++;
835                 budget--;
836         next_pkt:
837                 bp->rx_prod = (bp->rx_prod + 1) &
838                         (B44_RX_RING_SIZE - 1);
839                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
840         }
841
842         bp->rx_cons = cons;
843         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
844
845         return received;
846 }
847
848 static int b44_poll(struct napi_struct *napi, int budget)
849 {
850         struct b44 *bp = container_of(napi, struct b44, napi);
851         int work_done;
852         unsigned long flags;
853
854         spin_lock_irqsave(&bp->lock, flags);
855
856         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
857                 /* spin_lock(&bp->tx_lock); */
858                 b44_tx(bp);
859                 /* spin_unlock(&bp->tx_lock); */
860         }
861         spin_unlock_irqrestore(&bp->lock, flags);
862
863         work_done = 0;
864         if (bp->istat & ISTAT_RX)
865                 work_done += b44_rx(bp, budget);
866
867         if (bp->istat & ISTAT_ERRORS) {
868                 spin_lock_irqsave(&bp->lock, flags);
869                 b44_halt(bp);
870                 b44_init_rings(bp);
871                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
872                 netif_wake_queue(bp->dev);
873                 spin_unlock_irqrestore(&bp->lock, flags);
874                 work_done = 0;
875         }
876
877         if (work_done < budget) {
878                 napi_complete(napi);
879                 b44_enable_ints(bp);
880         }
881
882         return work_done;
883 }
884
885 static irqreturn_t b44_interrupt(int irq, void *dev_id)
886 {
887         struct net_device *dev = dev_id;
888         struct b44 *bp = netdev_priv(dev);
889         u32 istat, imask;
890         int handled = 0;
891
892         spin_lock(&bp->lock);
893
894         istat = br32(bp, B44_ISTAT);
895         imask = br32(bp, B44_IMASK);
896
897         /* The interrupt mask register controls which interrupt bits
898          * will actually raise an interrupt to the CPU when set by hw/firmware,
899          * but doesn't mask off the bits.
900          */
901         istat &= imask;
902         if (istat) {
903                 handled = 1;
904
905                 if (unlikely(!netif_running(dev))) {
906                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
907                         goto irq_ack;
908                 }
909
910                 if (napi_schedule_prep(&bp->napi)) {
911                         /* NOTE: These writes are posted by the readback of
912                          *       the ISTAT register below.
913                          */
914                         bp->istat = istat;
915                         __b44_disable_ints(bp);
916                         __napi_schedule(&bp->napi);
917                 }
918
919 irq_ack:
920                 bw32(bp, B44_ISTAT, istat);
921                 br32(bp, B44_ISTAT);
922         }
923         spin_unlock(&bp->lock);
924         return IRQ_RETVAL(handled);
925 }
926
927 static void b44_tx_timeout(struct net_device *dev)
928 {
929         struct b44 *bp = netdev_priv(dev);
930
931         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
932                dev->name);
933
934         spin_lock_irq(&bp->lock);
935
936         b44_halt(bp);
937         b44_init_rings(bp);
938         b44_init_hw(bp, B44_FULL_RESET);
939
940         spin_unlock_irq(&bp->lock);
941
942         b44_enable_ints(bp);
943
944         netif_wake_queue(dev);
945 }
946
947 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948 {
949         struct b44 *bp = netdev_priv(dev);
950         int rc = NETDEV_TX_OK;
951         dma_addr_t mapping;
952         u32 len, entry, ctrl;
953         unsigned long flags;
954
955         len = skb->len;
956         spin_lock_irqsave(&bp->lock, flags);
957
958         /* This is a hard error, log it. */
959         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960                 netif_stop_queue(dev);
961                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
962                        dev->name);
963                 goto err_out;
964         }
965
966         mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
967         if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
968                 struct sk_buff *bounce_skb;
969
970                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
971                 if (!ssb_dma_mapping_error(bp->sdev, mapping))
972                         ssb_dma_unmap_single(bp->sdev, mapping, len,
973                                              DMA_TO_DEVICE);
974
975                 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
976                 if (!bounce_skb)
977                         goto err_out;
978
979                 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
980                                              len, DMA_TO_DEVICE);
981                 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
982                         if (!ssb_dma_mapping_error(bp->sdev, mapping))
983                                 ssb_dma_unmap_single(bp->sdev, mapping,
984                                                      len, DMA_TO_DEVICE);
985                         dev_kfree_skb_any(bounce_skb);
986                         goto err_out;
987                 }
988
989                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
990                 dev_kfree_skb_any(skb);
991                 skb = bounce_skb;
992         }
993
994         entry = bp->tx_prod;
995         bp->tx_buffers[entry].skb = skb;
996         bp->tx_buffers[entry].mapping = mapping;
997
998         ctrl  = (len & DESC_CTRL_LEN);
999         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1000         if (entry == (B44_TX_RING_SIZE - 1))
1001                 ctrl |= DESC_CTRL_EOT;
1002
1003         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1004         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1005
1006         if (bp->flags & B44_FLAG_TX_RING_HACK)
1007                 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1008                                             entry * sizeof(bp->tx_ring[0]),
1009                                             DMA_TO_DEVICE);
1010
1011         entry = NEXT_TX(entry);
1012
1013         bp->tx_prod = entry;
1014
1015         wmb();
1016
1017         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1019                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1020         if (bp->flags & B44_FLAG_REORDER_BUG)
1021                 br32(bp, B44_DMATX_PTR);
1022
1023         if (TX_BUFFS_AVAIL(bp) < 1)
1024                 netif_stop_queue(dev);
1025
1026         dev->trans_start = jiffies;
1027
1028 out_unlock:
1029         spin_unlock_irqrestore(&bp->lock, flags);
1030
1031         return rc;
1032
1033 err_out:
1034         rc = NETDEV_TX_BUSY;
1035         goto out_unlock;
1036 }
1037
1038 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1039 {
1040         struct b44 *bp = netdev_priv(dev);
1041
1042         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1043                 return -EINVAL;
1044
1045         if (!netif_running(dev)) {
1046                 /* We'll just catch it later when the
1047                  * device is up'd.
1048                  */
1049                 dev->mtu = new_mtu;
1050                 return 0;
1051         }
1052
1053         spin_lock_irq(&bp->lock);
1054         b44_halt(bp);
1055         dev->mtu = new_mtu;
1056         b44_init_rings(bp);
1057         b44_init_hw(bp, B44_FULL_RESET);
1058         spin_unlock_irq(&bp->lock);
1059
1060         b44_enable_ints(bp);
1061
1062         return 0;
1063 }
1064
1065 /* Free up pending packets in all rx/tx rings.
1066  *
1067  * The chip has been shut down and the driver detached from
1068  * the networking, so no interrupts or new tx packets will
1069  * end up in the driver.  bp->lock is not held and we are not
1070  * in an interrupt context and thus may sleep.
1071  */
1072 static void b44_free_rings(struct b44 *bp)
1073 {
1074         struct ring_info *rp;
1075         int i;
1076
1077         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1078                 rp = &bp->rx_buffers[i];
1079
1080                 if (rp->skb == NULL)
1081                         continue;
1082                 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1083                                      DMA_FROM_DEVICE);
1084                 dev_kfree_skb_any(rp->skb);
1085                 rp->skb = NULL;
1086         }
1087
1088         /* XXX needs changes once NETIF_F_SG is set... */
1089         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1090                 rp = &bp->tx_buffers[i];
1091
1092                 if (rp->skb == NULL)
1093                         continue;
1094                 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1095                                      DMA_TO_DEVICE);
1096                 dev_kfree_skb_any(rp->skb);
1097                 rp->skb = NULL;
1098         }
1099 }
1100
1101 /* Initialize tx/rx rings for packet processing.
1102  *
1103  * The chip has been shut down and the driver detached from
1104  * the networking, so no interrupts or new tx packets will
1105  * end up in the driver.
1106  */
1107 static void b44_init_rings(struct b44 *bp)
1108 {
1109         int i;
1110
1111         b44_free_rings(bp);
1112
1113         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1114         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1115
1116         if (bp->flags & B44_FLAG_RX_RING_HACK)
1117                 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1118                                                DMA_TABLE_BYTES,
1119                                                DMA_BIDIRECTIONAL);
1120
1121         if (bp->flags & B44_FLAG_TX_RING_HACK)
1122                 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1123                                                DMA_TABLE_BYTES,
1124                                                DMA_TO_DEVICE);
1125
1126         for (i = 0; i < bp->rx_pending; i++) {
1127                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1128                         break;
1129         }
1130 }
1131
1132 /*
1133  * Must not be invoked with interrupt sources disabled and
1134  * the hardware shutdown down.
1135  */
1136 static void b44_free_consistent(struct b44 *bp)
1137 {
1138         kfree(bp->rx_buffers);
1139         bp->rx_buffers = NULL;
1140         kfree(bp->tx_buffers);
1141         bp->tx_buffers = NULL;
1142         if (bp->rx_ring) {
1143                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1144                         ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1145                                              DMA_TABLE_BYTES,
1146                                              DMA_BIDIRECTIONAL);
1147                         kfree(bp->rx_ring);
1148                 } else
1149                         ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1150                                                 bp->rx_ring, bp->rx_ring_dma,
1151                                                 GFP_KERNEL);
1152                 bp->rx_ring = NULL;
1153                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1154         }
1155         if (bp->tx_ring) {
1156                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1157                         ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1158                                              DMA_TABLE_BYTES,
1159                                              DMA_TO_DEVICE);
1160                         kfree(bp->tx_ring);
1161                 } else
1162                         ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1163                                                 bp->tx_ring, bp->tx_ring_dma,
1164                                                 GFP_KERNEL);
1165                 bp->tx_ring = NULL;
1166                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1167         }
1168 }
1169
1170 /*
1171  * Must not be invoked with interrupt sources disabled and
1172  * the hardware shutdown down.  Can sleep.
1173  */
1174 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1175 {
1176         int size;
1177
1178         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1179         bp->rx_buffers = kzalloc(size, gfp);
1180         if (!bp->rx_buffers)
1181                 goto out_err;
1182
1183         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1184         bp->tx_buffers = kzalloc(size, gfp);
1185         if (!bp->tx_buffers)
1186                 goto out_err;
1187
1188         size = DMA_TABLE_BYTES;
1189         bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1190         if (!bp->rx_ring) {
1191                 /* Allocation may have failed due to pci_alloc_consistent
1192                    insisting on use of GFP_DMA, which is more restrictive
1193                    than necessary...  */
1194                 struct dma_desc *rx_ring;
1195                 dma_addr_t rx_ring_dma;
1196
1197                 rx_ring = kzalloc(size, gfp);
1198                 if (!rx_ring)
1199                         goto out_err;
1200
1201                 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1202                                                  DMA_TABLE_BYTES,
1203                                                  DMA_BIDIRECTIONAL);
1204
1205                 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1206                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
1207                         kfree(rx_ring);
1208                         goto out_err;
1209                 }
1210
1211                 bp->rx_ring = rx_ring;
1212                 bp->rx_ring_dma = rx_ring_dma;
1213                 bp->flags |= B44_FLAG_RX_RING_HACK;
1214         }
1215
1216         bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1217         if (!bp->tx_ring) {
1218                 /* Allocation may have failed due to ssb_dma_alloc_consistent
1219                    insisting on use of GFP_DMA, which is more restrictive
1220                    than necessary...  */
1221                 struct dma_desc *tx_ring;
1222                 dma_addr_t tx_ring_dma;
1223
1224                 tx_ring = kzalloc(size, gfp);
1225                 if (!tx_ring)
1226                         goto out_err;
1227
1228                 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1229                                             DMA_TABLE_BYTES,
1230                                             DMA_TO_DEVICE);
1231
1232                 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1233                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
1234                         kfree(tx_ring);
1235                         goto out_err;
1236                 }
1237
1238                 bp->tx_ring = tx_ring;
1239                 bp->tx_ring_dma = tx_ring_dma;
1240                 bp->flags |= B44_FLAG_TX_RING_HACK;
1241         }
1242
1243         return 0;
1244
1245 out_err:
1246         b44_free_consistent(bp);
1247         return -ENOMEM;
1248 }
1249
1250 /* bp->lock is held. */
1251 static void b44_clear_stats(struct b44 *bp)
1252 {
1253         unsigned long reg;
1254
1255         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1256         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1257                 br32(bp, reg);
1258         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1259                 br32(bp, reg);
1260 }
1261
1262 /* bp->lock is held. */
1263 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1264 {
1265         struct ssb_device *sdev = bp->sdev;
1266         bool was_enabled;
1267
1268         was_enabled = ssb_device_is_enabled(bp->sdev);
1269
1270         ssb_device_enable(bp->sdev, 0);
1271         ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1272
1273         if (was_enabled) {
1274                 bw32(bp, B44_RCV_LAZY, 0);
1275                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1277                 bw32(bp, B44_DMATX_CTRL, 0);
1278                 bp->tx_prod = bp->tx_cons = 0;
1279                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1281                                      100, 0);
1282                 }
1283                 bw32(bp, B44_DMARX_CTRL, 0);
1284                 bp->rx_prod = bp->rx_cons = 0;
1285         }
1286
1287         b44_clear_stats(bp);
1288
1289         /*
1290          * Don't enable PHY if we are doing a partial reset
1291          * we are probably going to power down
1292          */
1293         if (reset_kind == B44_CHIP_RESET_PARTIAL)
1294                 return;
1295
1296         switch (sdev->bus->bustype) {
1297         case SSB_BUSTYPE_SSB:
1298                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1299                      (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1300                                         B44_MDC_RATIO)
1301                      & MDIO_CTRL_MAXF_MASK)));
1302                 break;
1303         case SSB_BUSTYPE_PCI:
1304                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305                      (0x0d & MDIO_CTRL_MAXF_MASK)));
1306                 break;
1307         case SSB_BUSTYPE_PCMCIA:
1308         case SSB_BUSTYPE_SDIO:
1309                 WARN_ON(1); /* A device with this bus does not exist. */
1310                 break;
1311         }
1312
1313         br32(bp, B44_MDIO_CTRL);
1314
1315         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1316                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1317                 br32(bp, B44_ENET_CTRL);
1318                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1319         } else {
1320                 u32 val = br32(bp, B44_DEVCTRL);
1321
1322                 if (val & DEVCTRL_EPR) {
1323                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1324                         br32(bp, B44_DEVCTRL);
1325                         udelay(100);
1326                 }
1327                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1328         }
1329 }
1330
1331 /* bp->lock is held. */
1332 static void b44_halt(struct b44 *bp)
1333 {
1334         b44_disable_ints(bp);
1335         /* reset PHY */
1336         b44_phy_reset(bp);
1337         /* power down PHY */
1338         printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
1339         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1340         /* now reset the chip, but without enabling the MAC&PHY
1341          * part of it. This has to be done _after_ we shut down the PHY */
1342         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1343 }
1344
1345 /* bp->lock is held. */
1346 static void __b44_set_mac_addr(struct b44 *bp)
1347 {
1348         bw32(bp, B44_CAM_CTRL, 0);
1349         if (!(bp->dev->flags & IFF_PROMISC)) {
1350                 u32 val;
1351
1352                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1353                 val = br32(bp, B44_CAM_CTRL);
1354                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1355         }
1356 }
1357
1358 static int b44_set_mac_addr(struct net_device *dev, void *p)
1359 {
1360         struct b44 *bp = netdev_priv(dev);
1361         struct sockaddr *addr = p;
1362         u32 val;
1363
1364         if (netif_running(dev))
1365                 return -EBUSY;
1366
1367         if (!is_valid_ether_addr(addr->sa_data))
1368                 return -EINVAL;
1369
1370         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1371
1372         spin_lock_irq(&bp->lock);
1373
1374         val = br32(bp, B44_RXCONFIG);
1375         if (!(val & RXCONFIG_CAM_ABSENT))
1376                 __b44_set_mac_addr(bp);
1377
1378         spin_unlock_irq(&bp->lock);
1379
1380         return 0;
1381 }
1382
1383 /* Called at device open time to get the chip ready for
1384  * packet processing.  Invoked with bp->lock held.
1385  */
1386 static void __b44_set_rx_mode(struct net_device *);
1387 static void b44_init_hw(struct b44 *bp, int reset_kind)
1388 {
1389         u32 val;
1390
1391         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1392         if (reset_kind == B44_FULL_RESET) {
1393                 b44_phy_reset(bp);
1394                 b44_setup_phy(bp);
1395         }
1396
1397         /* Enable CRC32, set proper LED modes and power on PHY */
1398         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1399         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1400
1401         /* This sets the MAC address too.  */
1402         __b44_set_rx_mode(bp->dev);
1403
1404         /* MTU + eth header + possible VLAN tag + struct rx_header */
1405         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1406         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1407
1408         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1409         if (reset_kind == B44_PARTIAL_RESET) {
1410                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1412         } else {
1413                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1414                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1415                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1416                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1417                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1418
1419                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1420                 bp->rx_prod = bp->rx_pending;
1421
1422                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1423         }
1424
1425         val = br32(bp, B44_ENET_CTRL);
1426         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1427 }
1428
1429 static int b44_open(struct net_device *dev)
1430 {
1431         struct b44 *bp = netdev_priv(dev);
1432         int err;
1433
1434         err = b44_alloc_consistent(bp, GFP_KERNEL);
1435         if (err)
1436                 goto out;
1437
1438         napi_enable(&bp->napi);
1439
1440         b44_init_rings(bp);
1441         b44_init_hw(bp, B44_FULL_RESET);
1442
1443         b44_check_phy(bp);
1444
1445         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1446         if (unlikely(err < 0)) {
1447                 napi_disable(&bp->napi);
1448                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1449                 b44_free_rings(bp);
1450                 b44_free_consistent(bp);
1451                 goto out;
1452         }
1453
1454         init_timer(&bp->timer);
1455         bp->timer.expires = jiffies + HZ;
1456         bp->timer.data = (unsigned long) bp;
1457         bp->timer.function = b44_timer;
1458         add_timer(&bp->timer);
1459
1460         b44_enable_ints(bp);
1461         netif_start_queue(dev);
1462 out:
1463         return err;
1464 }
1465
1466 #ifdef CONFIG_NET_POLL_CONTROLLER
1467 /*
1468  * Polling receive - used by netconsole and other diagnostic tools
1469  * to allow network i/o with interrupts disabled.
1470  */
1471 static void b44_poll_controller(struct net_device *dev)
1472 {
1473         disable_irq(dev->irq);
1474         b44_interrupt(dev->irq, dev);
1475         enable_irq(dev->irq);
1476 }
1477 #endif
1478
1479 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1480 {
1481         u32 i;
1482         u32 *pattern = (u32 *) pp;
1483
1484         for (i = 0; i < bytes; i += sizeof(u32)) {
1485                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1486                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1487         }
1488 }
1489
1490 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1491 {
1492         int magicsync = 6;
1493         int k, j, len = offset;
1494         int ethaddr_bytes = ETH_ALEN;
1495
1496         memset(ppattern + offset, 0xff, magicsync);
1497         for (j = 0; j < magicsync; j++)
1498                 set_bit(len++, (unsigned long *) pmask);
1499
1500         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1501                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1502                         ethaddr_bytes = ETH_ALEN;
1503                 else
1504                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1505                 if (ethaddr_bytes <=0)
1506                         break;
1507                 for (k = 0; k< ethaddr_bytes; k++) {
1508                         ppattern[offset + magicsync +
1509                                 (j * ETH_ALEN) + k] = macaddr[k];
1510                         set_bit(len++, (unsigned long *) pmask);
1511                 }
1512         }
1513         return len - 1;
1514 }
1515
1516 /* Setup magic packet patterns in the b44 WOL
1517  * pattern matching filter.
1518  */
1519 static void b44_setup_pseudo_magicp(struct b44 *bp)
1520 {
1521
1522         u32 val;
1523         int plen0, plen1, plen2;
1524         u8 *pwol_pattern;
1525         u8 pwol_mask[B44_PMASK_SIZE];
1526
1527         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1528         if (!pwol_pattern) {
1529                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1530                 return;
1531         }
1532
1533         /* Ipv4 magic packet pattern - pattern 0.*/
1534         memset(pwol_mask, 0, B44_PMASK_SIZE);
1535         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536                                   B44_ETHIPV4UDP_HLEN);
1537
1538         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1539         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1540
1541         /* Raw ethernet II magic packet pattern - pattern 1 */
1542         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543         memset(pwol_mask, 0, B44_PMASK_SIZE);
1544         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545                                   ETH_HLEN);
1546
1547         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1549         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550                        B44_PMASK_BASE + B44_PMASK_SIZE);
1551
1552         /* Ipv6 magic packet pattern - pattern 2 */
1553         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1554         memset(pwol_mask, 0, B44_PMASK_SIZE);
1555         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1556                                   B44_ETHIPV6UDP_HLEN);
1557
1558         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1559                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1560         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1561                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1562
1563         kfree(pwol_pattern);
1564
1565         /* set these pattern's lengths: one less than each real length */
1566         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1567         bw32(bp, B44_WKUP_LEN, val);
1568
1569         /* enable wakeup pattern matching */
1570         val = br32(bp, B44_DEVCTRL);
1571         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1572
1573 }
1574
1575 #ifdef CONFIG_B44_PCI
1576 static void b44_setup_wol_pci(struct b44 *bp)
1577 {
1578         u16 val;
1579
1580         if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1581                 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1582                 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1583                 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1584         }
1585 }
1586 #else
1587 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1588 #endif /* CONFIG_B44_PCI */
1589
1590 static void b44_setup_wol(struct b44 *bp)
1591 {
1592         u32 val;
1593
1594         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1595
1596         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1597
1598                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1599
1600                 val = bp->dev->dev_addr[2] << 24 |
1601                         bp->dev->dev_addr[3] << 16 |
1602                         bp->dev->dev_addr[4] << 8 |
1603                         bp->dev->dev_addr[5];
1604                 bw32(bp, B44_ADDR_LO, val);
1605
1606                 val = bp->dev->dev_addr[0] << 8 |
1607                         bp->dev->dev_addr[1];
1608                 bw32(bp, B44_ADDR_HI, val);
1609
1610                 val = br32(bp, B44_DEVCTRL);
1611                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1612
1613         } else {
1614                 b44_setup_pseudo_magicp(bp);
1615         }
1616         b44_setup_wol_pci(bp);
1617 }
1618
1619 static int b44_close(struct net_device *dev)
1620 {
1621         struct b44 *bp = netdev_priv(dev);
1622
1623         netif_stop_queue(dev);
1624
1625         napi_disable(&bp->napi);
1626
1627         del_timer_sync(&bp->timer);
1628
1629         spin_lock_irq(&bp->lock);
1630
1631         b44_halt(bp);
1632         b44_free_rings(bp);
1633         netif_carrier_off(dev);
1634
1635         spin_unlock_irq(&bp->lock);
1636
1637         free_irq(dev->irq, dev);
1638
1639         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1640                 b44_init_hw(bp, B44_PARTIAL_RESET);
1641                 b44_setup_wol(bp);
1642         }
1643
1644         b44_free_consistent(bp);
1645
1646         return 0;
1647 }
1648
1649 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1650 {
1651         struct b44 *bp = netdev_priv(dev);
1652         struct net_device_stats *nstat = &dev->stats;
1653         struct b44_hw_stats *hwstat = &bp->hw_stats;
1654
1655         /* Convert HW stats into netdevice stats. */
1656         nstat->rx_packets = hwstat->rx_pkts;
1657         nstat->tx_packets = hwstat->tx_pkts;
1658         nstat->rx_bytes   = hwstat->rx_octets;
1659         nstat->tx_bytes   = hwstat->tx_octets;
1660         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1661                              hwstat->tx_oversize_pkts +
1662                              hwstat->tx_underruns +
1663                              hwstat->tx_excessive_cols +
1664                              hwstat->tx_late_cols);
1665         nstat->multicast  = hwstat->tx_multicast_pkts;
1666         nstat->collisions = hwstat->tx_total_cols;
1667
1668         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1669                                    hwstat->rx_undersize);
1670         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1671         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1672         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1673         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1674                                    hwstat->rx_oversize_pkts +
1675                                    hwstat->rx_missed_pkts +
1676                                    hwstat->rx_crc_align_errs +
1677                                    hwstat->rx_undersize +
1678                                    hwstat->rx_crc_errs +
1679                                    hwstat->rx_align_errs +
1680                                    hwstat->rx_symbol_errs);
1681
1682         nstat->tx_aborted_errors = hwstat->tx_underruns;
1683 #if 0
1684         /* Carrier lost counter seems to be broken for some devices */
1685         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1686 #endif
1687
1688         return nstat;
1689 }
1690
1691 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1692 {
1693         struct dev_mc_list *mclist;
1694         int i, num_ents;
1695
1696         num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1697         mclist = dev->mc_list;
1698         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1699                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1700         }
1701         return i+1;
1702 }
1703
1704 static void __b44_set_rx_mode(struct net_device *dev)
1705 {
1706         struct b44 *bp = netdev_priv(dev);
1707         u32 val;
1708
1709         val = br32(bp, B44_RXCONFIG);
1710         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1711         if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1712                 val |= RXCONFIG_PROMISC;
1713                 bw32(bp, B44_RXCONFIG, val);
1714         } else {
1715                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1716                 int i = 1;
1717
1718                 __b44_set_mac_addr(bp);
1719
1720                 if ((dev->flags & IFF_ALLMULTI) ||
1721                     (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1722                         val |= RXCONFIG_ALLMULTI;
1723                 else
1724                         i = __b44_load_mcast(bp, dev);
1725
1726                 for (; i < 64; i++)
1727                         __b44_cam_write(bp, zero, i);
1728
1729                 bw32(bp, B44_RXCONFIG, val);
1730                 val = br32(bp, B44_CAM_CTRL);
1731                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1732         }
1733 }
1734
1735 static void b44_set_rx_mode(struct net_device *dev)
1736 {
1737         struct b44 *bp = netdev_priv(dev);
1738
1739         spin_lock_irq(&bp->lock);
1740         __b44_set_rx_mode(dev);
1741         spin_unlock_irq(&bp->lock);
1742 }
1743
1744 static u32 b44_get_msglevel(struct net_device *dev)
1745 {
1746         struct b44 *bp = netdev_priv(dev);
1747         return bp->msg_enable;
1748 }
1749
1750 static void b44_set_msglevel(struct net_device *dev, u32 value)
1751 {
1752         struct b44 *bp = netdev_priv(dev);
1753         bp->msg_enable = value;
1754 }
1755
1756 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1757 {
1758         struct b44 *bp = netdev_priv(dev);
1759         struct ssb_bus *bus = bp->sdev->bus;
1760
1761         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1762         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1763         switch (bus->bustype) {
1764         case SSB_BUSTYPE_PCI:
1765                 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1766                 break;
1767         case SSB_BUSTYPE_SSB:
1768                 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1769                 break;
1770         case SSB_BUSTYPE_PCMCIA:
1771         case SSB_BUSTYPE_SDIO:
1772                 WARN_ON(1); /* A device with this bus does not exist. */
1773                 break;
1774         }
1775 }
1776
1777 static int b44_nway_reset(struct net_device *dev)
1778 {
1779         struct b44 *bp = netdev_priv(dev);
1780         u32 bmcr;
1781         int r;
1782
1783         spin_lock_irq(&bp->lock);
1784         b44_readphy(bp, MII_BMCR, &bmcr);
1785         b44_readphy(bp, MII_BMCR, &bmcr);
1786         r = -EINVAL;
1787         if (bmcr & BMCR_ANENABLE) {
1788                 b44_writephy(bp, MII_BMCR,
1789                              bmcr | BMCR_ANRESTART);
1790                 r = 0;
1791         }
1792         spin_unlock_irq(&bp->lock);
1793
1794         return r;
1795 }
1796
1797 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1798 {
1799         struct b44 *bp = netdev_priv(dev);
1800
1801         cmd->supported = (SUPPORTED_Autoneg);
1802         cmd->supported |= (SUPPORTED_100baseT_Half |
1803                           SUPPORTED_100baseT_Full |
1804                           SUPPORTED_10baseT_Half |
1805                           SUPPORTED_10baseT_Full |
1806                           SUPPORTED_MII);
1807
1808         cmd->advertising = 0;
1809         if (bp->flags & B44_FLAG_ADV_10HALF)
1810                 cmd->advertising |= ADVERTISED_10baseT_Half;
1811         if (bp->flags & B44_FLAG_ADV_10FULL)
1812                 cmd->advertising |= ADVERTISED_10baseT_Full;
1813         if (bp->flags & B44_FLAG_ADV_100HALF)
1814                 cmd->advertising |= ADVERTISED_100baseT_Half;
1815         if (bp->flags & B44_FLAG_ADV_100FULL)
1816                 cmd->advertising |= ADVERTISED_100baseT_Full;
1817         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1818         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1819                 SPEED_100 : SPEED_10;
1820         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1821                 DUPLEX_FULL : DUPLEX_HALF;
1822         cmd->port = 0;
1823         cmd->phy_address = bp->phy_addr;
1824         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1825                 XCVR_INTERNAL : XCVR_EXTERNAL;
1826         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1827                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1828         if (cmd->autoneg == AUTONEG_ENABLE)
1829                 cmd->advertising |= ADVERTISED_Autoneg;
1830         if (!netif_running(dev)){
1831                 cmd->speed = 0;
1832                 cmd->duplex = 0xff;
1833         }
1834         cmd->maxtxpkt = 0;
1835         cmd->maxrxpkt = 0;
1836         return 0;
1837 }
1838
1839 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1840 {
1841         struct b44 *bp = netdev_priv(dev);
1842
1843         /* We do not support gigabit. */
1844         if (cmd->autoneg == AUTONEG_ENABLE) {
1845                 if (cmd->advertising &
1846                     (ADVERTISED_1000baseT_Half |
1847                      ADVERTISED_1000baseT_Full))
1848                         return -EINVAL;
1849         } else if ((cmd->speed != SPEED_100 &&
1850                     cmd->speed != SPEED_10) ||
1851                    (cmd->duplex != DUPLEX_HALF &&
1852                     cmd->duplex != DUPLEX_FULL)) {
1853                         return -EINVAL;
1854         }
1855
1856         spin_lock_irq(&bp->lock);
1857
1858         if (cmd->autoneg == AUTONEG_ENABLE) {
1859                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1860                                B44_FLAG_100_BASE_T |
1861                                B44_FLAG_FULL_DUPLEX |
1862                                B44_FLAG_ADV_10HALF |
1863                                B44_FLAG_ADV_10FULL |
1864                                B44_FLAG_ADV_100HALF |
1865                                B44_FLAG_ADV_100FULL);
1866                 if (cmd->advertising == 0) {
1867                         bp->flags |= (B44_FLAG_ADV_10HALF |
1868                                       B44_FLAG_ADV_10FULL |
1869                                       B44_FLAG_ADV_100HALF |
1870                                       B44_FLAG_ADV_100FULL);
1871                 } else {
1872                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1873                                 bp->flags |= B44_FLAG_ADV_10HALF;
1874                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1875                                 bp->flags |= B44_FLAG_ADV_10FULL;
1876                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1877                                 bp->flags |= B44_FLAG_ADV_100HALF;
1878                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1879                                 bp->flags |= B44_FLAG_ADV_100FULL;
1880                 }
1881         } else {
1882                 bp->flags |= B44_FLAG_FORCE_LINK;
1883                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1884                 if (cmd->speed == SPEED_100)
1885                         bp->flags |= B44_FLAG_100_BASE_T;
1886                 if (cmd->duplex == DUPLEX_FULL)
1887                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1888         }
1889
1890         if (netif_running(dev))
1891                 b44_setup_phy(bp);
1892
1893         spin_unlock_irq(&bp->lock);
1894
1895         return 0;
1896 }
1897
1898 static void b44_get_ringparam(struct net_device *dev,
1899                               struct ethtool_ringparam *ering)
1900 {
1901         struct b44 *bp = netdev_priv(dev);
1902
1903         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1904         ering->rx_pending = bp->rx_pending;
1905
1906         /* XXX ethtool lacks a tx_max_pending, oops... */
1907 }
1908
1909 static int b44_set_ringparam(struct net_device *dev,
1910                              struct ethtool_ringparam *ering)
1911 {
1912         struct b44 *bp = netdev_priv(dev);
1913
1914         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1915             (ering->rx_mini_pending != 0) ||
1916             (ering->rx_jumbo_pending != 0) ||
1917             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1918                 return -EINVAL;
1919
1920         spin_lock_irq(&bp->lock);
1921
1922         bp->rx_pending = ering->rx_pending;
1923         bp->tx_pending = ering->tx_pending;
1924
1925         b44_halt(bp);
1926         b44_init_rings(bp);
1927         b44_init_hw(bp, B44_FULL_RESET);
1928         netif_wake_queue(bp->dev);
1929         spin_unlock_irq(&bp->lock);
1930
1931         b44_enable_ints(bp);
1932
1933         return 0;
1934 }
1935
1936 static void b44_get_pauseparam(struct net_device *dev,
1937                                 struct ethtool_pauseparam *epause)
1938 {
1939         struct b44 *bp = netdev_priv(dev);
1940
1941         epause->autoneg =
1942                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1943         epause->rx_pause =
1944                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1945         epause->tx_pause =
1946                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1947 }
1948
1949 static int b44_set_pauseparam(struct net_device *dev,
1950                                 struct ethtool_pauseparam *epause)
1951 {
1952         struct b44 *bp = netdev_priv(dev);
1953
1954         spin_lock_irq(&bp->lock);
1955         if (epause->autoneg)
1956                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1957         else
1958                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1959         if (epause->rx_pause)
1960                 bp->flags |= B44_FLAG_RX_PAUSE;
1961         else
1962                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1963         if (epause->tx_pause)
1964                 bp->flags |= B44_FLAG_TX_PAUSE;
1965         else
1966                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1967         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1968                 b44_halt(bp);
1969                 b44_init_rings(bp);
1970                 b44_init_hw(bp, B44_FULL_RESET);
1971         } else {
1972                 __b44_set_flow_ctrl(bp, bp->flags);
1973         }
1974         spin_unlock_irq(&bp->lock);
1975
1976         b44_enable_ints(bp);
1977
1978         return 0;
1979 }
1980
1981 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1982 {
1983         switch(stringset) {
1984         case ETH_SS_STATS:
1985                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1986                 break;
1987         }
1988 }
1989
1990 static int b44_get_sset_count(struct net_device *dev, int sset)
1991 {
1992         switch (sset) {
1993         case ETH_SS_STATS:
1994                 return ARRAY_SIZE(b44_gstrings);
1995         default:
1996                 return -EOPNOTSUPP;
1997         }
1998 }
1999
2000 static void b44_get_ethtool_stats(struct net_device *dev,
2001                                   struct ethtool_stats *stats, u64 *data)
2002 {
2003         struct b44 *bp = netdev_priv(dev);
2004         u32 *val = &bp->hw_stats.tx_good_octets;
2005         u32 i;
2006
2007         spin_lock_irq(&bp->lock);
2008
2009         b44_stats_update(bp);
2010
2011         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2012                 *data++ = *val++;
2013
2014         spin_unlock_irq(&bp->lock);
2015 }
2016
2017 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2018 {
2019         struct b44 *bp = netdev_priv(dev);
2020
2021         wol->supported = WAKE_MAGIC;
2022         if (bp->flags & B44_FLAG_WOL_ENABLE)
2023                 wol->wolopts = WAKE_MAGIC;
2024         else
2025                 wol->wolopts = 0;
2026         memset(&wol->sopass, 0, sizeof(wol->sopass));
2027 }
2028
2029 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2030 {
2031         struct b44 *bp = netdev_priv(dev);
2032
2033         spin_lock_irq(&bp->lock);
2034         if (wol->wolopts & WAKE_MAGIC)
2035                 bp->flags |= B44_FLAG_WOL_ENABLE;
2036         else
2037                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2038         spin_unlock_irq(&bp->lock);
2039
2040         return 0;
2041 }
2042
2043 static const struct ethtool_ops b44_ethtool_ops = {
2044         .get_drvinfo            = b44_get_drvinfo,
2045         .get_settings           = b44_get_settings,
2046         .set_settings           = b44_set_settings,
2047         .nway_reset             = b44_nway_reset,
2048         .get_link               = ethtool_op_get_link,
2049         .get_wol                = b44_get_wol,
2050         .set_wol                = b44_set_wol,
2051         .get_ringparam          = b44_get_ringparam,
2052         .set_ringparam          = b44_set_ringparam,
2053         .get_pauseparam         = b44_get_pauseparam,
2054         .set_pauseparam         = b44_set_pauseparam,
2055         .get_msglevel           = b44_get_msglevel,
2056         .set_msglevel           = b44_set_msglevel,
2057         .get_strings            = b44_get_strings,
2058         .get_sset_count         = b44_get_sset_count,
2059         .get_ethtool_stats      = b44_get_ethtool_stats,
2060 };
2061
2062 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2063 {
2064         struct mii_ioctl_data *data = if_mii(ifr);
2065         struct b44 *bp = netdev_priv(dev);
2066         int err = -EINVAL;
2067
2068         if (!netif_running(dev))
2069                 goto out;
2070
2071         spin_lock_irq(&bp->lock);
2072         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2073         spin_unlock_irq(&bp->lock);
2074 out:
2075         return err;
2076 }
2077
2078 static int __devinit b44_get_invariants(struct b44 *bp)
2079 {
2080         struct ssb_device *sdev = bp->sdev;
2081         int err = 0;
2082         u8 *addr;
2083
2084         bp->dma_offset = ssb_dma_translation(sdev);
2085
2086         if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2087             instance > 1) {
2088                 addr = sdev->bus->sprom.et1mac;
2089                 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2090         } else {
2091                 addr = sdev->bus->sprom.et0mac;
2092                 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2093         }
2094         /* Some ROMs have buggy PHY addresses with the high
2095          * bits set (sign extension?). Truncate them to a
2096          * valid PHY address. */
2097         bp->phy_addr &= 0x1F;
2098
2099         memcpy(bp->dev->dev_addr, addr, 6);
2100
2101         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2102                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2103                 return -EINVAL;
2104         }
2105
2106         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2107
2108         bp->imask = IMASK_DEF;
2109
2110         /* XXX - really required?
2111            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2112         */
2113
2114         if (bp->sdev->id.revision >= 7)
2115                 bp->flags |= B44_FLAG_B0_ANDLATER;
2116
2117         return err;
2118 }
2119
2120 static const struct net_device_ops b44_netdev_ops = {
2121         .ndo_open               = b44_open,
2122         .ndo_stop               = b44_close,
2123         .ndo_start_xmit         = b44_start_xmit,
2124         .ndo_get_stats          = b44_get_stats,
2125         .ndo_set_multicast_list = b44_set_rx_mode,
2126         .ndo_set_mac_address    = b44_set_mac_addr,
2127         .ndo_validate_addr      = eth_validate_addr,
2128         .ndo_do_ioctl           = b44_ioctl,
2129         .ndo_tx_timeout         = b44_tx_timeout,
2130         .ndo_change_mtu         = b44_change_mtu,
2131 #ifdef CONFIG_NET_POLL_CONTROLLER
2132         .ndo_poll_controller    = b44_poll_controller,
2133 #endif
2134 };
2135
2136 static int __devinit b44_init_one(struct ssb_device *sdev,
2137                                   const struct ssb_device_id *ent)
2138 {
2139         static int b44_version_printed = 0;
2140         struct net_device *dev;
2141         struct b44 *bp;
2142         int err;
2143
2144         instance++;
2145
2146         if (b44_version_printed++ == 0)
2147                 printk(KERN_INFO "%s", version);
2148
2149
2150         dev = alloc_etherdev(sizeof(*bp));
2151         if (!dev) {
2152                 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2153                 err = -ENOMEM;
2154                 goto out;
2155         }
2156
2157         SET_NETDEV_DEV(dev, sdev->dev);
2158
2159         /* No interesting netdevice features in this card... */
2160         dev->features |= 0;
2161
2162         bp = netdev_priv(dev);
2163         bp->sdev = sdev;
2164         bp->dev = dev;
2165         bp->force_copybreak = 0;
2166
2167         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2168
2169         spin_lock_init(&bp->lock);
2170
2171         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2172         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2173
2174         dev->netdev_ops = &b44_netdev_ops;
2175         netif_napi_add(dev, &bp->napi, b44_poll, 64);
2176         dev->watchdog_timeo = B44_TX_TIMEOUT;
2177         dev->irq = sdev->irq;
2178         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2179
2180         netif_carrier_off(dev);
2181
2182         err = ssb_bus_powerup(sdev->bus, 0);
2183         if (err) {
2184                 dev_err(sdev->dev,
2185                         "Failed to powerup the bus\n");
2186                 goto err_out_free_dev;
2187         }
2188         err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2189         if (err) {
2190                 dev_err(sdev->dev,
2191                         "Required 30BIT DMA mask unsupported by the system.\n");
2192                 goto err_out_powerdown;
2193         }
2194         err = b44_get_invariants(bp);
2195         if (err) {
2196                 dev_err(sdev->dev,
2197                         "Problem fetching invariants of chip, aborting.\n");
2198                 goto err_out_powerdown;
2199         }
2200
2201         bp->mii_if.dev = dev;
2202         bp->mii_if.mdio_read = b44_mii_read;
2203         bp->mii_if.mdio_write = b44_mii_write;
2204         bp->mii_if.phy_id = bp->phy_addr;
2205         bp->mii_if.phy_id_mask = 0x1f;
2206         bp->mii_if.reg_num_mask = 0x1f;
2207
2208         /* By default, advertise all speed/duplex settings. */
2209         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2210                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2211
2212         /* By default, auto-negotiate PAUSE. */
2213         bp->flags |= B44_FLAG_PAUSE_AUTO;
2214
2215         err = register_netdev(dev);
2216         if (err) {
2217                 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2218                 goto err_out_powerdown;
2219         }
2220
2221         ssb_set_drvdata(sdev, dev);
2222
2223         /* Chip reset provides power to the b44 MAC & PCI cores, which
2224          * is necessary for MAC register access.
2225          */
2226         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2227
2228         printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2229                dev->name, dev->dev_addr);
2230
2231         return 0;
2232
2233 err_out_powerdown:
2234         ssb_bus_may_powerdown(sdev->bus);
2235
2236 err_out_free_dev:
2237         free_netdev(dev);
2238
2239 out:
2240         return err;
2241 }
2242
2243 static void __devexit b44_remove_one(struct ssb_device *sdev)
2244 {
2245         struct net_device *dev = ssb_get_drvdata(sdev);
2246
2247         unregister_netdev(dev);
2248         ssb_device_disable(sdev, 0);
2249         ssb_bus_may_powerdown(sdev->bus);
2250         free_netdev(dev);
2251         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2252         ssb_set_drvdata(sdev, NULL);
2253 }
2254
2255 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2256 {
2257         struct net_device *dev = ssb_get_drvdata(sdev);
2258         struct b44 *bp = netdev_priv(dev);
2259
2260         if (!netif_running(dev))
2261                 return 0;
2262
2263         del_timer_sync(&bp->timer);
2264
2265         spin_lock_irq(&bp->lock);
2266
2267         b44_halt(bp);
2268         netif_carrier_off(bp->dev);
2269         netif_device_detach(bp->dev);
2270         b44_free_rings(bp);
2271
2272         spin_unlock_irq(&bp->lock);
2273
2274         free_irq(dev->irq, dev);
2275         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2276                 b44_init_hw(bp, B44_PARTIAL_RESET);
2277                 b44_setup_wol(bp);
2278         }
2279
2280         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2281         return 0;
2282 }
2283
2284 static int b44_resume(struct ssb_device *sdev)
2285 {
2286         struct net_device *dev = ssb_get_drvdata(sdev);
2287         struct b44 *bp = netdev_priv(dev);
2288         int rc = 0;
2289
2290         rc = ssb_bus_powerup(sdev->bus, 0);
2291         if (rc) {
2292                 dev_err(sdev->dev,
2293                         "Failed to powerup the bus\n");
2294                 return rc;
2295         }
2296
2297         if (!netif_running(dev))
2298                 return 0;
2299
2300         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2301         if (rc) {
2302                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2303                 return rc;
2304         }
2305
2306         spin_lock_irq(&bp->lock);
2307
2308         b44_init_rings(bp);
2309         b44_init_hw(bp, B44_FULL_RESET);
2310         netif_device_attach(bp->dev);
2311         spin_unlock_irq(&bp->lock);
2312
2313         b44_enable_ints(bp);
2314         netif_wake_queue(dev);
2315
2316         mod_timer(&bp->timer, jiffies + 1);
2317
2318         return 0;
2319 }
2320
2321 static struct ssb_driver b44_ssb_driver = {
2322         .name           = DRV_MODULE_NAME,
2323         .id_table       = b44_ssb_tbl,
2324         .probe          = b44_init_one,
2325         .remove         = __devexit_p(b44_remove_one),
2326         .suspend        = b44_suspend,
2327         .resume         = b44_resume,
2328 };
2329
2330 static inline int b44_pci_init(void)
2331 {
2332         int err = 0;
2333 #ifdef CONFIG_B44_PCI
2334         err = ssb_pcihost_register(&b44_pci_driver);
2335 #endif
2336         return err;
2337 }
2338
2339 static inline void b44_pci_exit(void)
2340 {
2341 #ifdef CONFIG_B44_PCI
2342         ssb_pcihost_unregister(&b44_pci_driver);
2343 #endif
2344 }
2345
2346 static int __init b44_init(void)
2347 {
2348         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2349         int err;
2350
2351         /* Setup paramaters for syncing RX/TX DMA descriptors */
2352         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2353         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2354
2355         err = b44_pci_init();
2356         if (err)
2357                 return err;
2358         err = ssb_driver_register(&b44_ssb_driver);
2359         if (err)
2360                 b44_pci_exit();
2361         return err;
2362 }
2363
2364 static void __exit b44_cleanup(void)
2365 {
2366         ssb_driver_unregister(&b44_ssb_driver);
2367         b44_pci_exit();
2368 }
2369
2370 module_init(b44_init);
2371 module_exit(b44_cleanup);
2372