can: sja1000 platform data fixes
[safe/jmp/linux-2.6] / drivers / net / sungem.c
index c3e6aa1..434f9d7 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/in.h>
-#include <linux/slab.h>
+#include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/delay.h>
 #include <linux/init.h>
@@ -57,6 +57,7 @@
 #include <linux/bitops.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
+#include <linux/gfp.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -106,7 +107,7 @@ MODULE_LICENSE("GPL");
 #define GEM_MODULE_NAME        "gem"
 #define PFX GEM_MODULE_NAME ": "
 
-static struct pci_device_id gem_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 
@@ -781,7 +782,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
                        break;
 
                /* When writing back RX descriptor, GEM writes status
-                * then buffer address, possibly in seperate transactions.
+                * then buffer address, possibly in separate transactions.
                 * If we don't wait for the chip to write both, we could
                 * post a new buffer to this descriptor then have GEM spam
                 * on the buffer address.  We sync on the RX completion
@@ -1015,7 +1016,8 @@ static __inline__ int gem_intme(int entry)
        return 0;
 }
 
-static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
+                                 struct net_device *dev)
 {
        struct gem *gp = netdev_priv(dev);
        int entry;
@@ -1032,10 +1034,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        (csum_stuff_off << 21));
        }
 
-       local_irq_save(flags);
-       if (!spin_trylock(&gp->tx_lock)) {
+       if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
                /* Tell upper layer to requeue */
-               local_irq_restore(flags);
                return NETDEV_TX_LOCKED;
        }
        /* We raced with gem_do_stop() */
@@ -1136,7 +1136,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
        writel(gp->tx_new, gp->regs + TXDMA_KICK);
        spin_unlock_irqrestore(&gp->tx_lock, flags);
 
-       dev->trans_start = jiffies;
+       dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
 
        return NETDEV_TX_OK;
 }
@@ -1229,7 +1229,7 @@ static void gem_reset(struct gem *gp)
                        break;
        } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
 
-       if (limit <= 0)
+       if (limit < 0)
                printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
 
        if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
@@ -1837,7 +1837,7 @@ static u32 gem_setup_multicast(struct gem *gp)
        int i;
 
        if ((gp->dev->flags & IFF_ALLMULTI) ||
-           (gp->dev->mc_count > 256)) {
+           (netdev_mc_count(gp->dev) > 256)) {
                for (i=0; i<16; i++)
                        writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
                rxcfg |= MAC_RXCFG_HFE;
@@ -1846,16 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
        } else {
                u16 hash_table[16];
                u32 crc;
-               struct dev_mc_list *dmi = gp->dev->mc_list;
+               struct netdev_hw_addr *ha;
                int i;
 
-               for (i = 0; i < 16; i++)
-                       hash_table[i] = 0;
-
-               for (i = 0; i < gp->dev->mc_count; i++) {
-                       char *addrs = dmi->dmi_addr;
-
-                       dmi = dmi->next;
+               memset(hash_table, 0, sizeof(hash_table));
+               netdev_for_each_mc_addr(ha, gp->dev) {
+                       char *addrs = ha->addr;
 
                        if (!(*addrs & 1))
                                continue;
@@ -2061,7 +2057,15 @@ static int gem_check_invariants(struct gem *gp)
                mif_cfg &= ~MIF_CFG_PSELECT;
                writel(mif_cfg, gp->regs + MIF_CFG);
        } else {
-               gp->phy_type = phy_serialink;
+#ifdef CONFIG_SPARC
+               const char *p;
+
+               p = of_get_property(gp->of_node, "shared-pins", NULL);
+               if (p && !strcmp(p, "serdes"))
+                       gp->phy_type = phy_serdes;
+               else
+#endif
+                       gp->phy_type = phy_serialink;
        }
        if (gp->phy_type == phy_mii_mdio1 ||
            gp->phy_type == phy_mii_mdio0) {
@@ -2851,9 +2855,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
 
        case SIOCSMIIREG:               /* Write MII PHY register. */
-               if (!capable(CAP_NET_ADMIN))
-                       rc = -EPERM;
-               else if (!gp->running)
+               if (!gp->running)
                        rc = -EAGAIN;
                else {
                        __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
@@ -2921,7 +2923,6 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
        dev_addr[1] = 0x00;
        dev_addr[2] = 0x20;
        get_random_bytes(dev_addr + 3, 3);
-       return;
 }
 #endif /* not Sparc and not PPC */
 
@@ -2998,8 +2999,11 @@ static const struct net_device_ops gem_netdev_ops = {
        .ndo_do_ioctl           = gem_ioctl,
        .ndo_tx_timeout         = gem_tx_timeout,
        .ndo_change_mtu         = gem_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = gem_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = gem_poll_controller,
+#endif
 };
 
 static int __devinit gem_init_one(struct pci_dev *pdev,
@@ -3039,10 +3043,10 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
         */
        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
            pdev->device == PCI_DEVICE_ID_SUN_GEM &&
-           !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        printk(KERN_ERR PFX "No usable DMA configuration, "
                               "aborting.\n");
@@ -3161,10 +3165,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
        dev->watchdog_timeo = 5 * HZ;
        dev->irq = pdev->irq;
        dev->dma = 0;
-       dev->set_mac_address = gem_set_mac_address;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = gem_poll_controller;
-#endif
 
        /* Set that now, in case PM kicks in now */
        pci_set_drvdata(pdev, dev);