X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fsunqe.c;h=e811331d4608a86abc8514848cdacddf73b83123;hb=6e01d1a4b2f7110201e7fe16e561a721d76fab3e;hp=1f2323be60d44648ea565e4edfe449af53155b66;hpb=10158286e7b5347dce2285895c95419b9f6f8b63;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 1f2323b..e811331 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -1,10 +1,9 @@ -/* $Id: sunqe.c,v 1.55 2002/01/15 06:48:55 davem Exp $ - * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. +/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed * if you make it look like a LANCE. * - * Copyright (C) 1996, 1999, 2003 David S. Miller (davem@redhat.com) + * Copyright (C) 1996, 1999, 2003, 2006 David S. Miller (davem@davemloft.net) */ #include @@ -41,9 +40,9 @@ #include "sunqe.h" #define DRV_NAME "sunqe" -#define DRV_VERSION "3.0" -#define DRV_RELDATE "8/24/03" -#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" +#define DRV_VERSION "4.0" +#define DRV_RELDATE "June 23, 2006" +#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; @@ -261,31 +260,31 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_carrier_errors++; + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.collisions++; + dev->stats.tx_errors++; + dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } @@ -298,104 +297,104 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) } if (qe_status & CREG_STAT_CCOFLOW) { - qep->net_stats.tx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.tx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.rx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_over_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_missed_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_over_errors++; + dev->stats.rx_errors++; + dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.collisions++; + dev->stats.rx_errors++; + dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_frame_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_crc_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_dropped++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); - qep->net_stats.rx_errors++; + dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } @@ -410,6 +409,7 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; + struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; @@ -429,30 +429,29 @@ static void qe_rx(struct sunqe *qep) /* Check for errors. */ if (len < ETH_ZLEN) { - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; - qep->net_stats.rx_dropped++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if (skb == NULL) { drops++; - qep->net_stats.rx_dropped++; + dev->stats.rx_dropped++; } else { - skb->dev = qep->dev; skb_reserve(skb, 2); skb_put(skb, len); - eth_copy_and_sum(skb, (unsigned char *) this_qbuf, - len, 0); + skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, + len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); qep->dev->last_rx = jiffies; - qep->net_stats.rx_packets++; - qep->net_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); - + elem = NEXT_RX(elem); this = &rxbase[elem]; } @@ -467,9 +466,9 @@ static void qe_tx_reclaim(struct sunqe *qep); * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */ -static irqreturn_t qec_interrupt(int irq, void *dev_id, struct pt_regs *regs) +static irqreturn_t qec_interrupt(int irq, void *dev_id) { - struct sunqec *qecp = (struct sunqec *) dev_id; + struct sunqec *qecp = dev_id; u32 qec_status; int channel = 0; @@ -594,7 +593,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Avoid a race... */ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; - memcpy(txbuf, skb->data, len); + skb_copy_from_linear_data(skb, txbuf, len); qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; qep->qe_block->qe_txd[entry].tx_flags = @@ -605,8 +604,8 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); - qep->net_stats.tx_packets++; - qep->net_stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. @@ -624,13 +623,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *qe_get_stats(struct net_device *dev) -{ - struct sunqe *qep = (struct sunqe *) dev->priv; - - return &qep->net_stats; -} - static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = (struct sunqe *) dev->priv; @@ -719,7 +711,7 @@ static u32 qe_get_link(struct net_device *dev) return (phyconfig & MREGS_PHYCONFIG_LSTAT); } -static struct ethtool_ops qe_ethtool_ops = { +static const struct ethtool_ops qe_ethtool_ops = { .get_drvinfo = qe_get_drvinfo, .get_link = qe_get_link, }; @@ -755,298 +747,269 @@ static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev) qecp->gregs + GLOB_RSIZE); } -/* Four QE's per QEC card. */ -static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev) +static u8 __devinit qec_get_burst(struct device_node *dp) { - static unsigned version_printed; - struct net_device *qe_devs[4]; - struct sunqe *qeps[4]; - struct sbus_dev *qesdevs[4]; - struct sbus_dev *child; - struct sunqec *qecp = NULL; u8 bsizes, bsizes_more; - int i, j, res = -ENOMEM; - for (i = 0; i < 4; i++) { - qe_devs[i] = alloc_etherdev(sizeof(struct sunqe)); - if (!qe_devs[i]) - goto out; - } + /* Find and set the burst sizes for the QEC, since it + * does the actual dma for all 4 channels. + */ + bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); + bsizes &= 0xff; + bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); - if (version_printed++ == 0) - printk(KERN_INFO "%s", version); + if (bsizes_more != 0xff) + bsizes &= bsizes_more; + if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || + (bsizes & DMA_BURST32)==0) + bsizes = (DMA_BURST32 - 1); - for (i = 0; i < 4; i++) { - qeps[i] = (struct sunqe *) qe_devs[i]->priv; - for (j = 0; j < 6; j++) - qe_devs[i]->dev_addr[j] = idprom->id_ethaddr[j]; - qeps[i]->channel = i; - spin_lock_init(&qeps[i]->lock); - } + return bsizes; +} - qecp = kmalloc(sizeof(struct sunqec), GFP_KERNEL); - if (qecp == NULL) - goto out1; - qecp->qec_sdev = sdev; +static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev) +{ + struct sbus_dev *qec_sdev = child_sdev->parent; + struct sunqec *qecp; - for (i = 0; i < 4; i++) { - qecp->qes[i] = qeps[i]; - qeps[i]->dev = qe_devs[i]; - qeps[i]->parent = qecp; + for (qecp = root_qec_dev; qecp; qecp = qecp->next_module) { + if (qecp->qec_sdev == qec_sdev) + break; } + if (!qecp) { + qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); + if (qecp) { + u32 ctrl; + + qecp->qec_sdev = qec_sdev; + qecp->gregs = sbus_ioremap(&qec_sdev->resource[0], 0, + GLOB_REG_SIZE, + "QEC Global Registers"); + if (!qecp->gregs) + goto fail; + + /* Make sure the QEC is in MACE mode. */ + ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); + ctrl &= 0xf0000000; + if (ctrl != GLOB_CTRL_MMODE) { + printk(KERN_ERR "qec: Not in MACE mode!\n"); + goto fail; + } - res = -ENODEV; + if (qec_global_reset(qecp->gregs)) + goto fail; - for (i = 0, child = sdev->child; i < 4; i++, child = child->next) { - /* Link in channel */ - j = prom_getintdefault(child->prom_node, "channel#", -1); - if (j == -1) - goto out2; - qesdevs[j] = child; - } + qecp->qec_bursts = qec_get_burst(qec_sdev->ofdev.node); - for (i = 0; i < 4; i++) - qeps[i]->qe_sdev = qesdevs[i]; + qec_init_once(qecp, qec_sdev); - /* Now map in the registers, QEC globals first. */ - qecp->gregs = sbus_ioremap(&sdev->resource[0], 0, - GLOB_REG_SIZE, "QEC Global Registers"); - if (!qecp->gregs) { - printk(KERN_ERR "QuadEther: Cannot map QEC global registers.\n"); - goto out2; - } + if (request_irq(qec_sdev->irqs[0], &qec_interrupt, + IRQF_SHARED, "qec", (void *) qecp)) { + printk(KERN_ERR "qec: Can't register irq.\n"); + goto fail; + } - /* Make sure the QEC is in MACE mode. */ - if ((sbus_readl(qecp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_MMODE) { - printk(KERN_ERR "QuadEther: AIEEE, QEC is not in MACE mode!\n"); - goto out3; + qecp->next_module = root_qec_dev; + root_qec_dev = qecp; + } } - /* Reset the QEC. */ - if (qec_global_reset(qecp->gregs)) - goto out3; + return qecp; - /* Find and set the burst sizes for the QEC, since it does - * the actual dma for all 4 channels. - */ - bsizes = prom_getintdefault(sdev->prom_node, "burst-sizes", 0xff); - bsizes &= 0xff; - bsizes_more = prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff); +fail: + if (qecp->gregs) + sbus_iounmap(qecp->gregs, GLOB_REG_SIZE); + kfree(qecp); + return NULL; +} - if (bsizes_more != 0xff) - bsizes &= bsizes_more; - if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || - (bsizes & DMA_BURST32)==0) - bsizes = (DMA_BURST32 - 1); +static int __devinit qec_ether_init(struct sbus_dev *sdev) +{ + static unsigned version_printed; + struct net_device *dev; + struct sunqe *qe; + struct sunqec *qecp; + int i, res; - qecp->qec_bursts = bsizes; + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); - /* Perform one time QEC initialization, we never touch the QEC - * globals again after this. - */ - qec_init_once(qecp, sdev); - - for (i = 0; i < 4; i++) { - struct sunqe *qe = qeps[i]; - /* Map in QEC per-channel control registers. */ - qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0, - CREG_REG_SIZE, "QEC Channel Registers"); - if (!qe->qcregs) { - printk(KERN_ERR "QuadEther: Cannot map QE %d's channel registers.\n", i); - goto out4; - } + dev = alloc_etherdev(sizeof(struct sunqe)); + if (!dev) + return -ENOMEM; - /* Map in per-channel AMD MACE registers. */ - qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0, - MREGS_REG_SIZE, "QE MACE Registers"); - if (!qe->mregs) { - printk(KERN_ERR "QuadEther: Cannot map QE %d's MACE registers.\n", i); - goto out4; - } + memcpy(dev->dev_addr, idprom->id_ethaddr, 6); - qe->qe_block = sbus_alloc_consistent(qe->qe_sdev, - PAGE_SIZE, - &qe->qblock_dvma); - qe->buffers = sbus_alloc_consistent(qe->qe_sdev, - sizeof(struct sunqe_buffers), - &qe->buffers_dvma); - if (qe->qe_block == NULL || qe->qblock_dvma == 0 || - qe->buffers == NULL || qe->buffers_dvma == 0) { - goto out4; - } + qe = netdev_priv(dev); - /* Stop this QE. */ - qe_stop(qe); + i = of_getintprop_default(sdev->ofdev.node, "channel#", -1); + if (i == -1) { + struct sbus_dev *td = sdev->parent->child; + i = 0; + while (td != sdev) { + td = td->next; + i++; + } } + qe->channel = i; + spin_lock_init(&qe->lock); - for (i = 0; i < 4; i++) { - SET_MODULE_OWNER(qe_devs[i]); - qe_devs[i]->open = qe_open; - qe_devs[i]->stop = qe_close; - qe_devs[i]->hard_start_xmit = qe_start_xmit; - qe_devs[i]->get_stats = qe_get_stats; - qe_devs[i]->set_multicast_list = qe_set_multicast; - qe_devs[i]->tx_timeout = qe_tx_timeout; - qe_devs[i]->watchdog_timeo = 5*HZ; - qe_devs[i]->irq = sdev->irqs[0]; - qe_devs[i]->dma = 0; - qe_devs[i]->ethtool_ops = &qe_ethtool_ops; - } + res = -ENODEV; + qecp = get_qec(sdev); + if (!qecp) + goto fail; - /* QEC receives interrupts from each QE, then it sends the actual - * IRQ to the cpu itself. Since QEC is the single point of - * interrupt for all QE channels we register the IRQ handler - * for it now. - */ - if (request_irq(sdev->irqs[0], &qec_interrupt, - SA_SHIRQ, "QuadEther", (void *) qecp)) { - printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n"); - res = -EAGAIN; - goto out4; - } + qecp->qes[qe->channel] = qe; + qe->dev = dev; + qe->parent = qecp; + qe->qe_sdev = sdev; - for (i = 0; i < 4; i++) { - if (register_netdev(qe_devs[i]) != 0) - goto out5; + res = -ENOMEM; + qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0, + CREG_REG_SIZE, "QEC Channel Registers"); + if (!qe->qcregs) { + printk(KERN_ERR "qe: Cannot map channel registers.\n"); + goto fail; } - /* Report the QE channels. */ - for (i = 0; i < 4; i++) { - printk(KERN_INFO "%s: QuadEthernet channel[%d] ", qe_devs[i]->name, i); - for (j = 0; j < 6; j++) - printk ("%2.2x%c", - qe_devs[i]->dev_addr[j], - j == 5 ? ' ': ':'); - printk("\n"); + qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0, + MREGS_REG_SIZE, "QE MACE Registers"); + if (!qe->mregs) { + printk(KERN_ERR "qe: Cannot map MACE registers.\n"); + goto fail; } - /* We are home free at this point, link the qe's into - * the master list for later driver exit. - */ - qecp->next_module = root_qec_dev; - root_qec_dev = qecp; + qe->qe_block = sbus_alloc_consistent(qe->qe_sdev, + PAGE_SIZE, + &qe->qblock_dvma); + qe->buffers = sbus_alloc_consistent(qe->qe_sdev, + sizeof(struct sunqe_buffers), + &qe->buffers_dvma); + if (qe->qe_block == NULL || qe->qblock_dvma == 0 || + qe->buffers == NULL || qe->buffers_dvma == 0) + goto fail; + + /* Stop this QE. */ + qe_stop(qe); + + SET_NETDEV_DEV(dev, &sdev->ofdev.dev); + + dev->open = qe_open; + dev->stop = qe_close; + dev->hard_start_xmit = qe_start_xmit; + dev->set_multicast_list = qe_set_multicast; + dev->tx_timeout = qe_tx_timeout; + dev->watchdog_timeo = 5*HZ; + dev->irq = sdev->irqs[0]; + dev->dma = 0; + dev->ethtool_ops = &qe_ethtool_ops; + + res = register_netdev(dev); + if (res) + goto fail; + + dev_set_drvdata(&sdev->ofdev.dev, qe); + + printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel); + for (i = 0; i < 6; i++) + printk ("%2.2x%c", + dev->dev_addr[i], + i == 5 ? ' ': ':'); + printk("\n"); + return 0; -out5: - while (i--) - unregister_netdev(qe_devs[i]); - free_irq(sdev->irqs[0], (void *)qecp); -out4: - for (i = 0; i < 4; i++) { - struct sunqe *qe = (struct sunqe *)qe_devs[i]->priv; - - if (qe->qcregs) - sbus_iounmap(qe->qcregs, CREG_REG_SIZE); - if (qe->mregs) - sbus_iounmap(qe->mregs, MREGS_REG_SIZE); - if (qe->qe_block) - sbus_free_consistent(qe->qe_sdev, - PAGE_SIZE, - qe->qe_block, - qe->qblock_dvma); - if (qe->buffers) - sbus_free_consistent(qe->qe_sdev, - sizeof(struct sunqe_buffers), - qe->buffers, - qe->buffers_dvma); - } -out3: - sbus_iounmap(qecp->gregs, GLOB_REG_SIZE); -out2: - kfree(qecp); -out1: - i = 4; -out: - while (i--) - free_netdev(qe_devs[i]); +fail: + if (qe->qcregs) + sbus_iounmap(qe->qcregs, CREG_REG_SIZE); + if (qe->mregs) + sbus_iounmap(qe->mregs, MREGS_REG_SIZE); + if (qe->qe_block) + sbus_free_consistent(qe->qe_sdev, + PAGE_SIZE, + qe->qe_block, + qe->qblock_dvma); + if (qe->buffers) + sbus_free_consistent(qe->qe_sdev, + sizeof(struct sunqe_buffers), + qe->buffers, + qe->buffers_dvma); + + free_netdev(dev); + return res; } -static int __init qec_match(struct sbus_dev *sdev) +static int __devinit qec_sbus_probe(struct of_device *dev, const struct of_device_id *match) { - struct sbus_dev *sibling; - int i; - - if (strcmp(sdev->prom_name, "qec") != 0) - return 0; + struct sbus_dev *sdev = to_sbus_device(&dev->dev); - /* QEC can be parent of either QuadEthernet or BigMAC - * children. Do not confuse this with qfe/SUNW,qfe - * which is a quad-happymeal card and handled by - * a different driver. - */ - sibling = sdev->child; - for (i = 0; i < 4; i++) { - if (sibling == NULL) - return 0; - if (strcmp(sibling->prom_name, "qe") != 0) - return 0; - sibling = sibling->next; - } - return 1; + return qec_ether_init(sdev); } -static int __init qec_probe(void) +static int __devexit qec_sbus_remove(struct of_device *dev) { - struct net_device *dev = NULL; - struct sbus_bus *bus; - struct sbus_dev *sdev = NULL; - static int called; - int cards = 0, v; - - root_qec_dev = NULL; - - if (called) - return -ENODEV; - called++; - - for_each_sbus(bus) { - for_each_sbusdev(sdev, bus) { - if (cards) - dev = NULL; - - if (qec_match(sdev)) { - cards++; - if ((v = qec_ether_init(dev, sdev))) - return v; - } - } - } - if (!cards) - return -ENODEV; + struct sunqe *qp = dev_get_drvdata(&dev->dev); + struct net_device *net_dev = qp->dev; + + unregister_netdev(net_dev); + + sbus_iounmap(qp->qcregs, CREG_REG_SIZE); + sbus_iounmap(qp->mregs, MREGS_REG_SIZE); + sbus_free_consistent(qp->qe_sdev, + PAGE_SIZE, + qp->qe_block, + qp->qblock_dvma); + sbus_free_consistent(qp->qe_sdev, + sizeof(struct sunqe_buffers), + qp->buffers, + qp->buffers_dvma); + + free_netdev(net_dev); + + dev_set_drvdata(&dev->dev, NULL); + return 0; } -static void __exit qec_cleanup(void) +static struct of_device_id qec_sbus_match[] = { + { + .name = "qe", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, qec_sbus_match); + +static struct of_platform_driver qec_sbus_driver = { + .name = "qec", + .match_table = qec_sbus_match, + .probe = qec_sbus_probe, + .remove = __devexit_p(qec_sbus_remove), +}; + +static int __init qec_init(void) +{ + return of_register_driver(&qec_sbus_driver, &sbus_bus_type); +} + +static void __exit qec_exit(void) { - struct sunqec *next_qec; - int i; + of_unregister_driver(&qec_sbus_driver); while (root_qec_dev) { - next_qec = root_qec_dev->next_module; - - /* Release all four QE channels, then the QEC itself. */ - for (i = 0; i < 4; i++) { - unregister_netdev(root_qec_dev->qes[i]->dev); - sbus_iounmap(root_qec_dev->qes[i]->qcregs, CREG_REG_SIZE); - sbus_iounmap(root_qec_dev->qes[i]->mregs, MREGS_REG_SIZE); - sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev, - PAGE_SIZE, - root_qec_dev->qes[i]->qe_block, - root_qec_dev->qes[i]->qblock_dvma); - sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev, - sizeof(struct sunqe_buffers), - root_qec_dev->qes[i]->buffers, - root_qec_dev->qes[i]->buffers_dvma); - free_netdev(root_qec_dev->qes[i]->dev); - } - free_irq(root_qec_dev->qec_sdev->irqs[0], (void *)root_qec_dev); + struct sunqec *next = root_qec_dev->next_module; + + free_irq(root_qec_dev->qec_sdev->irqs[0], + (void *) root_qec_dev); sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE); + kfree(root_qec_dev); - root_qec_dev = next_qec; + + root_qec_dev = next; } } -module_init(qec_probe); -module_exit(qec_cleanup); +module_init(qec_init); +module_exit(qec_exit);