Merge master.kernel.org:/home/rmk/linux-2.6-arm
[safe/jmp/linux-2.6] / drivers / net / sgiseeq.c
index 3145ca1..f4dfd1f 100644 (file)
@@ -56,14 +56,6 @@ static char *sgiseeqstr = "SGI Seeq8003";
                                  (dma_addr_t)((unsigned long)(v) -            \
                                               (unsigned long)((sp)->rx_desc)))
 
-#define DMA_SYNC_DESC_CPU(dev, addr) \
-       do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
-            sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0)
-
-#define DMA_SYNC_DESC_DEV(dev, addr) \
-       do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
-            sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0)
-
 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
  */
@@ -116,6 +108,18 @@ struct sgiseeq_private {
        spinlock_t tx_lock;
 };
 
+static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
+{
+       dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+                      DMA_FROM_DEVICE);
+}
+
+static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
+{
+       dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+                      DMA_TO_DEVICE);
+}
+
 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
 {
        hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
@@ -184,12 +188,12 @@ static int seeq_init_ring(struct net_device *dev)
        /* Setup tx ring. */
        for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
                sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
-               DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]);
+               dma_sync_desc_dev(dev, &sp->tx_desc[i]);
        }
 
        /* And now the rx ring. */
        for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
-               if (!sp->rx_desc[i].rdma.pbuf) {
+               if (!sp->rx_desc[i].skb) {
                        dma_addr_t dma_addr;
                        struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
 
@@ -203,10 +207,10 @@ static int seeq_init_ring(struct net_device *dev)
                        sp->rx_desc[i].rdma.pbuf = dma_addr;
                }
                sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
-               DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]);
+               dma_sync_desc_dev(dev, &sp->rx_desc[i]);
        }
        sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
-       DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]);
+       dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
        return 0;
 }
 
@@ -341,7 +345,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
 
        /* Service every received packet. */
        rd = &sp->rx_desc[sp->rx_new];
-       DMA_SYNC_DESC_CPU(dev, rd);
+       dma_sync_desc_cpu(dev, rd);
        while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
                len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
                dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
@@ -373,7 +377,6 @@ memory_squeeze:
                                        skb_put(skb, len);
                                        skb->protocol = eth_type_trans(skb, dev);
                                        netif_rx(skb);
-                                       dev->last_rx = jiffies;
                                        dev->stats.rx_packets++;
                                        dev->stats.rx_bytes += len;
                                } else {
@@ -397,16 +400,16 @@ memory_squeeze:
                /* Return the entry to the ring pool. */
                rd->rdma.cntinfo = RCNTINFO_INIT;
                sp->rx_new = NEXT_RX(sp->rx_new);
-               DMA_SYNC_DESC_DEV(dev, rd);
+               dma_sync_desc_dev(dev, rd);
                rd = &sp->rx_desc[sp->rx_new];
-               DMA_SYNC_DESC_CPU(dev, rd);
+               dma_sync_desc_cpu(dev, rd);
        }
-       DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]);
+       dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
        sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
-       DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]);
-       DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+       dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
+       dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
        sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
-       DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+       dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
        rx_maybe_restart(sp, hregs, sregs);
 }
 
@@ -433,12 +436,12 @@ static inline void kick_tx(struct net_device *dev,
         * is not active!
         */
        td = &sp->tx_desc[i];
-       DMA_SYNC_DESC_CPU(dev, td);
+       dma_sync_desc_cpu(dev, td);
        while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
              (HPCDMA_XIU | HPCDMA_ETXD)) {
                i = NEXT_TX(i);
                td = &sp->tx_desc[i];
-               DMA_SYNC_DESC_CPU(dev, td);
+               dma_sync_desc_cpu(dev, td);
        }
        if (td->tdma.cntinfo & HPCDMA_XIU) {
                hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
@@ -470,7 +473,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
        for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
                td = &sp->tx_desc[j];
 
-               DMA_SYNC_DESC_CPU(dev, td);
+               dma_sync_desc_cpu(dev, td);
                if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
                        break;
                if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
@@ -488,7 +491,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
                        dev_kfree_skb_any(td->skb);
                        td->skb = NULL;
                }
-               DMA_SYNC_DESC_DEV(dev, td);
+               dma_sync_desc_dev(dev, td);
        }
 }
 
@@ -591,14 +594,14 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
        len = skb->len;
        if (len < ETH_ZLEN) {
                if (skb_padto(skb, ETH_ZLEN))
-                       return 0;
+                       return NETDEV_TX_OK;
                len = ETH_ZLEN;
        }
 
        dev->stats.tx_bytes += len;
        entry = sp->tx_new;
        td = &sp->tx_desc[entry];
-       DMA_SYNC_DESC_CPU(dev, td);
+       dma_sync_desc_cpu(dev, td);
 
        /* Create entry.  There are so many races with adding a new
         * descriptor to the chain:
@@ -618,14 +621,14 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                       len, DMA_TO_DEVICE);
        td->tdma.cntinfo = (len & HPCDMA_BCNT) |
                           HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
-       DMA_SYNC_DESC_DEV(dev, td);
+       dma_sync_desc_dev(dev, td);
        if (sp->tx_old != sp->tx_new) {
                struct sgiseeq_tx_desc *backend;
 
                backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
-               DMA_SYNC_DESC_CPU(dev, backend);
+               dma_sync_desc_cpu(dev, backend);
                backend->tdma.cntinfo &= ~HPCDMA_EOX;
-               DMA_SYNC_DESC_DEV(dev, backend);
+               dma_sync_desc_dev(dev, backend);
        }
        sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
 
@@ -639,7 +642,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
        spin_unlock_irqrestore(&sp->tx_lock, flags);
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 static void timeout(struct net_device *dev)
@@ -653,7 +656,7 @@ static void timeout(struct net_device *dev)
 
 static void sgiseeq_set_multicast(struct net_device *dev)
 {
-       struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv;
+       struct sgiseeq_private *sp = netdev_priv(dev);
        unsigned char oldmode = sp->mode;
 
        if(dev->flags & IFF_PROMISC)
@@ -681,11 +684,11 @@ static inline void setup_tx_ring(struct net_device *dev,
        while (i < (nbufs - 1)) {
                buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
                buf[i].tdma.pbuf = 0;
-               DMA_SYNC_DESC_DEV(dev, &buf[i]);
+               dma_sync_desc_dev(dev, &buf[i]);
                i++;
        }
        buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
-       DMA_SYNC_DESC_DEV(dev, &buf[i]);
+       dma_sync_desc_dev(dev, &buf[i]);
 }
 
 static inline void setup_rx_ring(struct net_device *dev,
@@ -698,15 +701,26 @@ static inline void setup_rx_ring(struct net_device *dev,
        while (i < (nbufs - 1)) {
                buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
                buf[i].rdma.pbuf = 0;
-               DMA_SYNC_DESC_DEV(dev, &buf[i]);
+               dma_sync_desc_dev(dev, &buf[i]);
                i++;
        }
        buf[i].rdma.pbuf = 0;
        buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
-       DMA_SYNC_DESC_DEV(dev, &buf[i]);
+       dma_sync_desc_dev(dev, &buf[i]);
 }
 
-static int __init sgiseeq_probe(struct platform_device *pdev)
+static const struct net_device_ops sgiseeq_netdev_ops = {
+       .ndo_open               = sgiseeq_open,
+       .ndo_stop               = sgiseeq_close,
+       .ndo_start_xmit         = sgiseeq_start_xmit,
+       .ndo_tx_timeout         = timeout,
+       .ndo_set_multicast_list = sgiseeq_set_multicast,
+       .ndo_set_mac_address    = sgiseeq_set_mac_address,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static int __devinit sgiseeq_probe(struct platform_device *pdev)
 {
        struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
        struct hpc3_regs *hpcregs = pd->hpc;
@@ -715,7 +729,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
        struct sgiseeq_private *sp;
        struct net_device *dev;
        int err;
-       DECLARE_MAC_BUF(mac);
 
        dev = alloc_etherdev(sizeof (struct sgiseeq_private));
        if (!dev) {
@@ -773,13 +786,8 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
                              SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
                              SEEQ_CTRL_ENCARR;
 
-       dev->open               = sgiseeq_open;
-       dev->stop               = sgiseeq_close;
-       dev->hard_start_xmit    = sgiseeq_start_xmit;
-       dev->tx_timeout         = timeout;
+       dev->netdev_ops         = &sgiseeq_netdev_ops;
        dev->watchdog_timeo     = (200 * HZ) / 1000;
-       dev->set_multicast_list = sgiseeq_set_multicast;
-       dev->set_mac_address    = sgiseeq_set_mac_address;
        dev->irq                = irq;
 
        if (register_netdev(dev)) {
@@ -789,8 +797,7 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
                goto err_out_free_page;
        }
 
-       printk(KERN_INFO "%s: %s %s\n",
-              dev->name, sgiseeqstr, print_mac(mac, dev->dev_addr));
+       printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
 
        return 0;
 
@@ -819,9 +826,10 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
 
 static struct platform_driver sgiseeq_driver = {
        .probe  = sgiseeq_probe,
-       .remove = __devexit_p(sgiseeq_remove),
+       .remove = __exit_p(sgiseeq_remove),
        .driver = {
-               .name   = "sgiseeq"
+               .name   = "sgiseeq",
+               .owner  = THIS_MODULE,
        }
 };
 
@@ -846,3 +854,4 @@ module_exit(sgiseeq_module_exit);
 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiseeq");