can: sja1000 platform data fixes
[safe/jmp/linux-2.6] / drivers / net / cassini.c
index 45831fb..6d76236 100644 (file)
@@ -43,7 +43,7 @@
  *  -- on page reclamation, the driver swaps the page with a spare page.
  *     if that page is still in use, it frees its reference to that page,
  *     and allocates a new page for use. otherwise, it just recycles the
- *     the page. 
+ *     the page.
  *
  * NOTE: cassini can parse the header. however, it's not worth it
  *       as long as the network stack requires a header copy.
  * interrupts, but the INT# assignment needs to be set up properly by
  * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  * that. also, the two descriptor rings are designed to distinguish between
- * encrypted and non-encrypted packets, but we use them for buffering 
+ * encrypted and non-encrypted packets, but we use them for buffering
  * instead.
  *
- * by default, the selective clear mask is set up to process rx packets.  
+ * by default, the selective clear mask is set up to process rx packets.
  */
 
-#include <linux/config.h>
-#include <linux/version.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -76,6 +75,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/vmalloc.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
 #include <linux/mm.h>
@@ -92,6 +92,8 @@
 #include <linux/mii.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
 
 #include <net/checksum.h>
 
 #define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
 #define CAS_NCPUS            num_online_cpus()
 
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
 #define USE_NAPI
 #define cas_skb_release(x)  netif_receive_skb(x)
 #else
 #endif
 
 /* select which firmware to use */
-#define USE_HP_WORKAROUND     
+#define USE_HP_WORKAROUND
 #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
 #define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
 
 #undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
 
 #define DRV_MODULE_NAME                "cassini"
-#define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.4"
-#define DRV_MODULE_RELDATE     "1 July 2004"
+#define DRV_MODULE_VERSION     "1.6"
+#define DRV_MODULE_RELDATE     "21 May 2008"
 
 #define CAS_DEF_MSG_ENABLE       \
        (NETIF_MSG_DRV          | \
 #define STOP_TRIES_PHY 1000
 #define STOP_TRIES     5000
 
-/* specify a minimum frame size to deal with some fifo issues 
+/* specify a minimum frame size to deal with some fifo issues
  * max mtu == 2 * page size - ethernet header - 64 - swivel =
  *            2 * page_size - 0x50
  */
 static char version[] __devinitdata =
        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
+static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
+static int link_mode;
+
 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
 MODULE_LICENSE("GPL");
-MODULE_PARM(cassini_debug, "i");
+MODULE_FIRMWARE("sun/cassini.bin");
+module_param(cassini_debug, int, 0);
 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
-MODULE_PARM(link_mode, "i");
+module_param(link_mode, int, 0);
 MODULE_PARM_DESC(link_mode, "default link mode");
 
 /*
@@ -205,11 +210,11 @@ MODULE_PARM_DESC(link_mode, "default link mode");
  * being confused and never showing a link status of "up."
  */
 #define DEFAULT_LINKDOWN_TIMEOUT 5
-/* 
+/*
  * Value in seconds, for user input.
  */
 static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
-MODULE_PARM(linkdown_timeout, "i");
+module_param(linkdown_timeout, int, 0);
 MODULE_PARM_DESC(linkdown_timeout,
 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
 
@@ -221,8 +226,6 @@ MODULE_PARM_DESC(linkdown_timeout,
 static int link_transition_timeout;
 
 
-static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
-static int link_mode;
 
 static u16 link_modes[] __devinitdata = {
        BMCR_ANENABLE,                   /* 0 : autoneg */
@@ -233,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
        CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
 };
 
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
        { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
@@ -249,7 +252,7 @@ static inline void cas_lock_tx(struct cas *cp)
 {
        int i;
 
-       for (i = 0; i < N_TX_RINGS; i++)  
+       for (i = 0; i < N_TX_RINGS; i++)
                spin_lock(&cp->tx_lock[i]);
 }
 
@@ -278,8 +281,8 @@ static inline void cas_unlock_tx(struct cas *cp)
 {
        int i;
 
-       for (i = N_TX_RINGS; i > 0; i--)  
-               spin_unlock(&cp->tx_lock[i - 1]);  
+       for (i = N_TX_RINGS; i > 0; i--)
+               spin_unlock(&cp->tx_lock[i - 1]);
 }
 
 static inline void cas_unlock_all(struct cas *cp)
@@ -316,7 +319,7 @@ static void cas_disable_irq(struct cas *cp, const int ring)
 #ifdef USE_PCI_INTD
                case 3:
 #endif
-                       writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, 
+                       writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
                               cp->regs + REG_PLUS_INTRN_MASK(ring));
                        break;
 #endif
@@ -391,7 +394,7 @@ static inline void cas_entropy_reset(struct cas *cp)
        if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
                return;
 
-       writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, 
+       writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
               cp->regs + REG_BIM_LOCAL_DEV_EN);
        writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
        writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
@@ -402,7 +405,7 @@ static inline void cas_entropy_reset(struct cas *cp)
 #endif
 }
 
-/* access to the phy. the following assumes that we've initialized the MIF to 
+/* access to the phy. the following assumes that we've initialized the MIF to
  * be in frame rather than bit-bang mode
  */
 static u16 cas_phy_read(struct cas *cp, int reg)
@@ -415,7 +418,7 @@ static u16 cas_phy_read(struct cas *cp, int reg)
        cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
        cmd |= MIF_FRAME_TURN_AROUND_MSB;
        writel(cmd, cp->regs + REG_MIF_FRAME);
-       
+
        /* poll for completion */
        while (limit-- > 0) {
                udelay(10);
@@ -437,7 +440,7 @@ static int cas_phy_write(struct cas *cp, int reg, u16 val)
        cmd |= MIF_FRAME_TURN_AROUND_MSB;
        cmd |= val & MIF_FRAME_DATA_MASK;
        writel(cmd, cp->regs + REG_MIF_FRAME);
-       
+
        /* poll for completion */
        while (limit-- > 0) {
                udelay(10);
@@ -450,7 +453,7 @@ static int cas_phy_write(struct cas *cp, int reg, u16 val)
 
 static void cas_phy_powerup(struct cas *cp)
 {
-       u16 ctl = cas_phy_read(cp, MII_BMCR);   
+       u16 ctl = cas_phy_read(cp, MII_BMCR);
 
        if ((ctl & BMCR_PDOWN) == 0)
                return;
@@ -460,7 +463,7 @@ static void cas_phy_powerup(struct cas *cp)
 
 static void cas_phy_powerdown(struct cas *cp)
 {
-       u16 ctl = cas_phy_read(cp, MII_BMCR);   
+       u16 ctl = cas_phy_read(cp, MII_BMCR);
 
        if (ctl & BMCR_PDOWN)
                return;
@@ -471,7 +474,7 @@ static void cas_phy_powerdown(struct cas *cp)
 /* cp->lock held. note: the last put_page will free the buffer */
 static int cas_page_free(struct cas *cp, cas_page_t *page)
 {
-       pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 
+       pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
                       PCI_DMA_FROMDEVICE);
        __free_pages(page->buffer, cp->page_order);
        kfree(page);
@@ -482,14 +485,14 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
 #define RX_USED_ADD(x, y)       ((x)->used += (y))
 #define RX_USED_SET(x, y)       ((x)->used  = (y))
 #else
-#define RX_USED_ADD(x, y) 
+#define RX_USED_ADD(x, y)
 #define RX_USED_SET(x, y)
 #endif
 
 /* local page allocation routines for the receive buffers. jumbo pages
  * require at least 8K contiguous and 8K aligned buffers.
  */
-static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
+static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
 {
        cas_page_t *page;
 
@@ -532,8 +535,7 @@ static void cas_spare_free(struct cas *cp)
        /* free spare buffers */
        INIT_LIST_HEAD(&list);
        spin_lock(&cp->rx_spare_lock);
-       list_splice(&cp->rx_spare_list, &list);
-       INIT_LIST_HEAD(&cp->rx_spare_list);
+       list_splice_init(&cp->rx_spare_list, &list);
        spin_unlock(&cp->rx_spare_lock);
        list_for_each_safe(elem, tmp, &list) {
                cas_page_free(cp, list_entry(elem, cas_page_t, list));
@@ -546,13 +548,11 @@ static void cas_spare_free(struct cas *cp)
         * lock than used everywhere else to manipulate this list.
         */
        spin_lock(&cp->rx_inuse_lock);
-       list_splice(&cp->rx_inuse_list, &list);
-       INIT_LIST_HEAD(&cp->rx_inuse_list);
+       list_splice_init(&cp->rx_inuse_list, &list);
        spin_unlock(&cp->rx_inuse_lock);
 #else
        spin_lock(&cp->rx_spare_lock);
-       list_splice(&cp->rx_inuse_list, &list);
-       INIT_LIST_HEAD(&cp->rx_inuse_list);
+       list_splice_init(&cp->rx_inuse_list, &list);
        spin_unlock(&cp->rx_spare_lock);
 #endif
        list_for_each_safe(elem, tmp, &list) {
@@ -561,7 +561,7 @@ static void cas_spare_free(struct cas *cp)
 }
 
 /* replenish spares if needed */
-static void cas_spare_recover(struct cas *cp, const int flags)
+static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 {
        struct list_head list, *elem, *tmp;
        int needed, i;
@@ -573,14 +573,25 @@ static void cas_spare_recover(struct cas *cp, const int flags)
        /* make a local copy of the list */
        INIT_LIST_HEAD(&list);
        spin_lock(&cp->rx_inuse_lock);
-       list_splice(&cp->rx_inuse_list, &list);
-       INIT_LIST_HEAD(&cp->rx_inuse_list);
+       list_splice_init(&cp->rx_inuse_list, &list);
        spin_unlock(&cp->rx_inuse_lock);
-       
+
        list_for_each_safe(elem, tmp, &list) {
                cas_page_t *page = list_entry(elem, cas_page_t, list);
 
-               if (page_count(page->buffer) > 1) 
+               /*
+                * With the lockless pagecache, cassini buffering scheme gets
+                * slightly less accurate: we might find that a page has an
+                * elevated reference count here, due to a speculative ref,
+                * and skip it as in-use. Ideally we would be able to reclaim
+                * it. However this would be such a rare case, it doesn't
+                * matter too much as we should pick it up the next time round.
+                *
+                * Importantly, if we find that the page has a refcount of 1
+                * here (our refcount), then we know it is definitely not inuse
+                * so we can reuse it.
+                */
+               if (page_count(page->buffer) > 1)
                        continue;
 
                list_del(elem);
@@ -601,7 +612,7 @@ static void cas_spare_recover(struct cas *cp, const int flags)
                list_splice(&list, &cp->rx_inuse_list);
                spin_unlock(&cp->rx_inuse_lock);
        }
-       
+
        spin_lock(&cp->rx_spare_lock);
        needed = cp->rx_spares_needed;
        spin_unlock(&cp->rx_spare_lock);
@@ -613,7 +624,7 @@ static void cas_spare_recover(struct cas *cp, const int flags)
        i = 0;
        while (i < needed) {
                cas_page_t *spare = cas_page_alloc(cp, flags);
-               if (!spare) 
+               if (!spare)
                        break;
                list_add(&spare->list, &list);
                i++;
@@ -638,9 +649,8 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
                cas_spare_recover(cp, GFP_ATOMIC);
                spin_lock(&cp->rx_spare_lock);
                if (list_empty(&cp->rx_spare_list)) {
-                       if (netif_msg_rx_err(cp))
-                               printk(KERN_ERR "%s: no spare buffers "
-                                      "available.\n", cp->dev->name);
+                       netif_err(cp, rx_err, cp->dev,
+                                 "no spare buffers available\n");
                        spin_unlock(&cp->rx_spare_lock);
                        return NULL;
                }
@@ -669,12 +679,12 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
 static void cas_mif_poll(struct cas *cp, const int enable)
 {
        u32 cfg;
-       
-       cfg  = readl(cp->regs + REG_MIF_CFG); 
+
+       cfg  = readl(cp->regs + REG_MIF_CFG);
        cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
 
        if (cp->phy_type & CAS_PHY_MII_MDIO1)
-               cfg |= MIF_CFG_PHY_SELECT; 
+               cfg |= MIF_CFG_PHY_SELECT;
 
        /* poll and interrupt on link status change. */
        if (enable) {
@@ -682,8 +692,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
                cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
                cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
        }
-       writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, 
-              cp->regs + REG_MIF_MASK); 
+       writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+              cp->regs + REG_MIF_MASK);
        writel(cfg, cp->regs + REG_MIF_CFG);
 }
 
@@ -717,12 +727,10 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
 #endif
 start_aneg:
        if (cp->lstate == link_up) {
-               printk(KERN_INFO "%s: PCS link down.\n",
-                      cp->dev->name);
+               netdev_info(cp->dev, "PCS link down\n");
        } else {
                if (changed) {
-                       printk(KERN_INFO "%s: link configuration changed\n",
-                              cp->dev->name);
+                       netdev_info(cp->dev, "link configuration changed\n");
                }
        }
        cp->lstate = link_down;
@@ -733,7 +741,7 @@ start_aneg:
        /*
         * WTZ: If the old state was link_up, we turn off the carrier
         * to replicate everything we do elsewhere on a link-down
-        * event when we were already in a link-up state..  
+        * event when we were already in a link-up state..
         */
        if (oldstate == link_up)
                netif_carrier_off(cp->dev);
@@ -741,7 +749,7 @@ start_aneg:
                /*
                 * WTZ: This branch will simply schedule a full reset after
                 * we explicitly changed link modes in an ioctl. See if this
-                * fixes the link-problems we were having for forced mode. 
+                * fixes the link-problems we were having for forced mode.
                 */
                atomic_inc(&cp->reset_task_pending);
                atomic_inc(&cp->reset_task_pending_all);
@@ -769,7 +777,7 @@ start_aneg:
        } else {
                cas_mif_poll(cp, 0);
                ctl = cas_phy_read(cp, MII_BMCR);
-               ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | 
+               ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
                         CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
                ctl |= cp->link_cntl;
                if (ctl & BMCR_ANENABLE) {
@@ -792,10 +800,10 @@ static int cas_reset_mii_phy(struct cas *cp)
 {
        int limit = STOP_TRIES_PHY;
        u16 val;
-       
+
        cas_phy_write(cp, MII_BMCR, BMCR_RESET);
        udelay(100);
-       while (limit--) {
+       while (--limit) {
                val = cas_phy_read(cp, MII_BMCR);
                if ((val & BMCR_RESET) == 0)
                        break;
@@ -804,9 +812,44 @@ static int cas_reset_mii_phy(struct cas *cp)
        return (limit <= 0);
 }
 
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+       const struct firmware *fw;
+       const char fw_name[] = "sun/cassini.bin";
+       int err;
+
+       if (PHY_NS_DP83065 != cp->phy_id)
+               return 0;
+
+       err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+       if (err) {
+               pr_err("Failed to load firmware \"%s\"\n",
+                      fw_name);
+               return err;
+       }
+       if (fw->size < 2) {
+               pr_err("bogus length %zu in \"%s\"\n",
+                      fw->size, fw_name);
+               err = -EINVAL;
+               goto out;
+       }
+       cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+       cp->fw_size = fw->size - 2;
+       cp->fw_data = vmalloc(cp->fw_size);
+       if (!cp->fw_data) {
+               err = -ENOMEM;
+               pr_err("\"%s\" Failed %d\n", fw_name, err);
+               goto out;
+       }
+       memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+       release_firmware(fw);
+       return err;
+}
+
 static void cas_saturn_firmware_load(struct cas *cp)
 {
-       cas_saturn_patch_t *patch = cas_saturn_patch;
+       int i;
 
        cas_phy_powerdown(cp);
 
@@ -825,11 +868,9 @@ static void cas_saturn_firmware_load(struct cas *cp)
 
        /* download new firmware */
        cas_phy_write(cp, DP83065_MII_MEM, 0x1);
-       cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
-       while (patch->addr) {
-               cas_phy_write(cp, DP83065_MII_REGD, patch->val);
-               patch++;
-       }
+       cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+       for (i = 0; i < cp->fw_size; i++)
+               cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
 
        /* enable firmware */
        cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
@@ -875,17 +916,17 @@ static void cas_phy_init(struct cas *cp)
                        val = cas_phy_read(cp, BROADCOM_MII_REG4);
                        if (val & 0x0080) {
                                /* link workaround */
-                               cas_phy_write(cp, BROADCOM_MII_REG4, 
+                               cas_phy_write(cp, BROADCOM_MII_REG4,
                                              val & ~0x0080);
                        }
-                       
+
                } else if (cp->cas_flags & CAS_FLAG_SATURN) {
-                       writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? 
-                              SATURN_PCFG_FSI : 0x0, 
+                       writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+                              SATURN_PCFG_FSI : 0x0,
                               cp->regs + REG_SATURN_PCFG);
 
                        /* load firmware to address 10Mbps auto-negotiation
-                        * issue. NOTE: this will need to be changed if the 
+                        * issue. NOTE: this will need to be changed if the
                         * default firmware gets fixed.
                         */
                        if (PHY_NS_DP83065 == cp->phy_id) {
@@ -904,9 +945,9 @@ static void cas_phy_init(struct cas *cp)
                              cas_phy_read(cp, MII_ADVERTISE) |
                              (ADVERTISE_10HALF | ADVERTISE_10FULL |
                               ADVERTISE_100HALF | ADVERTISE_100FULL |
-                              CAS_ADVERTISE_PAUSE | 
+                              CAS_ADVERTISE_PAUSE |
                               CAS_ADVERTISE_ASYM_PAUSE));
-               
+
                if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
                        /* make sure that we don't advertise half
                         * duplex to avoid a chip issue
@@ -935,16 +976,15 @@ static void cas_phy_init(struct cas *cp)
                writel(val, cp->regs + REG_PCS_MII_CTRL);
 
                limit = STOP_TRIES;
-               while (limit-- > 0) {
+               while (--limit > 0) {
                        udelay(10);
-                       if ((readl(cp->regs + REG_PCS_MII_CTRL) & 
+                       if ((readl(cp->regs + REG_PCS_MII_CTRL) &
                             PCS_MII_RESET) == 0)
                                break;
                }
                if (limit <= 0)
-                       printk(KERN_WARNING "%s: PCS reset bit would not "
-                              "clear [%08x].\n", cp->dev->name,
-                              readl(cp->regs + REG_PCS_STATE_MACHINE));
+                       netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+                                   readl(cp->regs + REG_PCS_STATE_MACHINE));
 
                /* Make sure PCS is disabled while changing advertisement
                 * configuration.
@@ -954,7 +994,7 @@ static void cas_phy_init(struct cas *cp)
                /* Advertise all capabilities except half-duplex. */
                val  = readl(cp->regs + REG_PCS_MII_ADVERT);
                val &= ~PCS_MII_ADVERT_HD;
-               val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | 
+               val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
                        PCS_MII_ADVERT_ASYM_PAUSE);
                writel(val, cp->regs + REG_PCS_MII_ADVERT);
 
@@ -986,11 +1026,8 @@ static int cas_pcs_link_check(struct cas *cp)
         */
        if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
                     PCS_MII_STATUS_REMOTE_FAULT)) ==
-           (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
-               if (netif_msg_link(cp))
-                       printk(KERN_INFO "%s: PCS RemoteFault\n", 
-                              cp->dev->name);
-       }
+           (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+               netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
 
        /* work around link detection issue by querying the PCS state
         * machine directly.
@@ -1007,7 +1044,7 @@ static int cas_pcs_link_check(struct cas *cp)
                        if (cp->opened) {
                                cp->lstate = link_up;
                                cp->link_transition = LINK_TRANSITION_LINK_UP;
-                               
+
                                cas_set_link_modes(cp);
                                netif_carrier_on(cp->dev);
                        }
@@ -1018,8 +1055,8 @@ static int cas_pcs_link_check(struct cas *cp)
                    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
                    !cp->link_transition_jiffies_valid) {
                        /*
-                        * force a reset, as a workaround for the 
-                        * link-failure problem. May want to move this to a 
+                        * force a reset, as a workaround for the
+                        * link-failure problem. May want to move this to a
                         * point a bit earlier in the sequence. If we had
                         * generated a reset a short time ago, we'll wait for
                         * the link timer to check the status until a
@@ -1037,10 +1074,8 @@ static int cas_pcs_link_check(struct cas *cp)
                        cp->link_transition = LINK_TRANSITION_ON_FAILURE;
                }
                netif_carrier_off(cp->dev);
-               if (cp->opened && netif_msg_link(cp)) {
-                       printk(KERN_INFO "%s: PCS link down.\n",
-                              cp->dev->name);
-               }
+               if (cp->opened)
+                       netif_info(cp, link, cp->dev, "PCS link down\n");
 
                /* Cassini only: if you force a mode, there can be
                 * sync problems on link down. to fix that, the following
@@ -1077,17 +1112,17 @@ static int cas_pcs_link_check(struct cas *cp)
        return retval;
 }
 
-static int cas_pcs_interrupt(struct net_device *dev, 
+static int cas_pcs_interrupt(struct net_device *dev,
                             struct cas *cp, u32 status)
 {
        u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
 
-       if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) 
+       if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
                return 0;
        return cas_pcs_link_check(cp);
 }
 
-static int cas_txmac_interrupt(struct net_device *dev, 
+static int cas_txmac_interrupt(struct net_device *dev,
                               struct cas *cp, u32 status)
 {
        u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
@@ -1095,9 +1130,8 @@ static int cas_txmac_interrupt(struct net_device *dev,
        if (!txmac_stat)
                return 0;
 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
-                       cp->dev->name, txmac_stat);
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
 
        /* Defer timer expiration is quite normal,
         * don't even log the event.
@@ -1108,14 +1142,12 @@ static int cas_txmac_interrupt(struct net_device *dev,
 
        spin_lock(&cp->stat_lock[0]);
        if (txmac_stat & MAC_TX_UNDERRUN) {
-               printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
-                      dev->name);
+               netdev_err(dev, "TX MAC xmit underrun\n");
                cp->net_stats[0].tx_fifo_errors++;
        }
 
        if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
-               printk(KERN_ERR "%s: TX MAC max packet size error.\n",
-                      dev->name);
+               netdev_err(dev, "TX MAC max packet size error\n");
                cp->net_stats[0].tx_errors++;
        }
 
@@ -1142,7 +1174,7 @@ static int cas_txmac_interrupt(struct net_device *dev,
        return 0;
 }
 
-static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) 
+static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
 {
        cas_hp_inst_t *inst;
        u32 val;
@@ -1177,12 +1209,12 @@ static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
 
 static void cas_init_rx_dma(struct cas *cp)
 {
-       u64 desc_dma = cp->block_dvma; 
+       u64 desc_dma = cp->block_dvma;
        u32 val;
        int i, size;
 
        /* rx free descriptors */
-       val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); 
+       val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
        val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
        val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
        if ((N_RX_DESC_RINGS > 1) &&
@@ -1190,27 +1222,27 @@ static void cas_init_rx_dma(struct cas *cp)
                val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
        writel(val, cp->regs + REG_RX_CFG);
 
-       val = (unsigned long) cp->init_rxds[0] - 
+       val = (unsigned long) cp->init_rxds[0] -
                (unsigned long) cp->init_block;
        writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
        writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
        writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
 
        if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               /* rx desc 2 is for IPSEC packets. however, 
+               /* rx desc 2 is for IPSEC packets. however,
                 * we don't it that for that purpose.
                 */
-               val = (unsigned long) cp->init_rxds[1] - 
+               val = (unsigned long) cp->init_rxds[1] -
                        (unsigned long) cp->init_block;
                writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
-               writel((desc_dma + val) & 0xffffffff, cp->regs + 
+               writel((desc_dma + val) & 0xffffffff, cp->regs +
                       REG_PLUS_RX_DB1_LOW);
-               writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + 
+               writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
                       REG_PLUS_RX_KICK1);
        }
-       
+
        /* rx completion registers */
-       val = (unsigned long) cp->init_rxcs[0] - 
+       val = (unsigned long) cp->init_rxcs[0] -
                (unsigned long) cp->init_block;
        writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
        writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
@@ -1218,11 +1250,11 @@ static void cas_init_rx_dma(struct cas *cp)
        if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
                /* rx comp 2-4 */
                for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
-                       val = (unsigned long) cp->init_rxcs[i] - 
+                       val = (unsigned long) cp->init_rxcs[i] -
                                (unsigned long) cp->init_block;
-                       writel((desc_dma + val) >> 32, cp->regs + 
+                       writel((desc_dma + val) >> 32, cp->regs +
                               REG_PLUS_RX_CBN_HI(i));
-                       writel((desc_dma + val) & 0xffffffff, cp->regs + 
+                       writel((desc_dma + val) & 0xffffffff, cp->regs +
                               REG_PLUS_RX_CBN_LOW(i));
                }
        }
@@ -1239,21 +1271,21 @@ static void cas_init_rx_dma(struct cas *cp)
 
                /* 2 is different from 3 and 4 */
                if (N_RX_COMP_RINGS > 1)
-                       writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, 
+                       writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
                               cp->regs + REG_PLUS_ALIASN_CLEAR(1));
 
-               for (i = 2; i < N_RX_COMP_RINGS; i++) 
-                       writel(INTR_RX_DONE_ALT, 
+               for (i = 2; i < N_RX_COMP_RINGS; i++)
+                       writel(INTR_RX_DONE_ALT,
                               cp->regs + REG_PLUS_ALIASN_CLEAR(i));
        }
 
        /* set up pause thresholds */
        val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
                        cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
-       val |= CAS_BASE(RX_PAUSE_THRESH_ON, 
+       val |= CAS_BASE(RX_PAUSE_THRESH_ON,
                        cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
        writel(val, cp->regs + REG_RX_PAUSE_THRESH);
-       
+
        /* zero out dma reassembly buffers */
        for (i = 0; i < 64; i++) {
                writel(i, cp->regs + REG_RX_TABLE_ADDR);
@@ -1292,7 +1324,7 @@ static void cas_init_rx_dma(struct cas *cp)
         * this should be tunable.
         */
        writel(0x0, cp->regs + REG_RX_RED);
-       
+
        /* receive page sizes. default == 2K (0x800) */
        val = 0;
        if (cp->page_size == 0x1000)
@@ -1301,7 +1333,7 @@ static void cas_init_rx_dma(struct cas *cp)
                val = 0x2;
        else if (cp->page_size == 0x4000)
                val = 0x3;
-       
+
        /* round mtu + offset. constrain to page size. */
        size = cp->dev->mtu + 64;
        if (size > cp->page_size)
@@ -1318,11 +1350,11 @@ static void cas_init_rx_dma(struct cas *cp)
 
        cp->mtu_stride = 1 << (i + 10);
        val  = CAS_BASE(RX_PAGE_SIZE, val);
-       val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); 
+       val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
        val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
        val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
        writel(val, cp->regs + REG_RX_PAGE_SIZE);
-       
+
        /* enable the header parser if desired */
        if (CAS_HP_FIRMWARE == cas_prog_null)
                return;
@@ -1336,7 +1368,7 @@ static void cas_init_rx_dma(struct cas *cp)
 static inline void cas_rxc_init(struct cas_rx_comp *rxc)
 {
        memset(rxc, 0, sizeof(*rxc));
-       rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); 
+       rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
 }
 
 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
@@ -1359,9 +1391,9 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
        }
        return new;
 }
-                                  
+
 /* this needs to be changed if we actually use the ENC RX DESC ring */
-static cas_page_t *cas_page_swap(struct cas *cp, const int ring, 
+static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
                                 const int index)
 {
        cas_page_t **page0 = cp->rx_pages[0];
@@ -1374,7 +1406,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
                        page1[index] = page0[index];
                        page0[index] = new;
                }
-       } 
+       }
        RX_USED_SET(page0[index], 0);
        return page0[index];
 }
@@ -1398,11 +1430,11 @@ static void cas_clean_rxds(struct cas *cp)
        for (i = 0; i < size; i++) {
                cas_page_t *page = cas_page_swap(cp, 0, i);
                rxd[i].buffer = cpu_to_le64(page->dma_addr);
-               rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | 
+               rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
                                            CAS_BASE(RX_INDEX_RING, 0));
        }
 
-       cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4; 
+       cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
        cp->rx_last[0] = 0;
        cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
 }
@@ -1443,8 +1475,7 @@ static int cas_rxmac_reset(struct cas *cp)
                udelay(10);
        }
        if (limit == STOP_TRIES) {
-               printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
-                      "chip.\n", dev->name);
+               netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
                return 1;
        }
 
@@ -1456,8 +1487,7 @@ static int cas_rxmac_reset(struct cas *cp)
                udelay(10);
        }
        if (limit == STOP_TRIES) {
-               printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
-                      "chip.\n", dev->name);
+               netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
                return 1;
        }
 
@@ -1471,8 +1501,7 @@ static int cas_rxmac_reset(struct cas *cp)
                udelay(10);
        }
        if (limit == STOP_TRIES) {
-               printk(KERN_ERR "%s: RX reset command will not execute, "
-                      "resetting whole chip.\n", dev->name);
+               netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
                return 1;
        }
 
@@ -1501,13 +1530,11 @@ static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
        if (!stat)
                return 0;
 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
-                       cp->dev->name, stat);
+       netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
 
        /* these are all rollovers */
        spin_lock(&cp->stat_lock[0]);
-       if (stat & MAC_RX_ALIGN_ERR) 
+       if (stat & MAC_RX_ALIGN_ERR)
                cp->net_stats[0].rx_frame_errors += 0x10000;
 
        if (stat & MAC_RX_CRC_ERR)
@@ -1536,9 +1563,8 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
        if (!stat)
                return 0;
 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
-                       cp->dev->name, stat);
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "mac interrupt, stat: 0x%x\n", stat);
 
        /* This interrupt is just for pause frame and pause
         * tracking.  It is useful for diagnostics and debug
@@ -1553,23 +1579,21 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
        return 0;
 }
 
-       
+
 /* Must be invoked under cp->lock. */
 static inline int cas_mdio_link_not_up(struct cas *cp)
 {
        u16 val;
-       
+
        switch (cp->lstate) {
        case link_force_ret:
-               if (netif_msg_link(cp))
-                       printk(KERN_INFO "%s: Autoneg failed again, keeping"
-                               " forced mode\n", cp->dev->name);
+               netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
                cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
                cp->timer_ticks = 5;
                cp->lstate = link_force_ok;
                cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
                break;
-               
+
        case link_aneg:
                val = cas_phy_read(cp, MII_BMCR);
 
@@ -1578,7 +1602,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
                 */
                val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
                val |= BMCR_FULLDPLX;
-               val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 
+               val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
                        CAS_BMCR_SPEED1000 : BMCR_SPEED100;
                cas_phy_write(cp, MII_BMCR, val);
                cp->timer_ticks = 5;
@@ -1620,20 +1644,20 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
 
        if (bmsr & BMSR_LSTATUS) {
                /* Ok, here we got a link. If we had it due to a forced
-                * fallback, and we were configured for autoneg, we 
+                * fallback, and we were configured for autoneg, we
                 * retry a short autoneg pass. If you know your hub is
                 * broken, use ethtool ;)
                 */
-               if ((cp->lstate == link_force_try) && 
+               if ((cp->lstate == link_force_try) &&
                    (cp->link_cntl & BMCR_ANENABLE)) {
                        cp->lstate = link_force_ret;
                        cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
                        cas_mif_poll(cp, 0);
                        cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
                        cp->timer_ticks = 5;
-                       if (cp->opened && netif_msg_link(cp))
-                               printk(KERN_INFO "%s: Got link after fallback, retrying"
-                                      " autoneg once...\n", cp->dev->name);
+                       if (cp->opened)
+                               netif_info(cp, link, cp->dev,
+                                          "Got link after fallback, retrying autoneg once...\n");
                        cas_phy_write(cp, MII_BMCR,
                                      cp->link_fcntl | BMCR_ANENABLE |
                                      BMCR_ANRESTART);
@@ -1660,14 +1684,13 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
                cp->link_transition = LINK_TRANSITION_LINK_DOWN;
 
                netif_carrier_off(cp->dev);
-               if (cp->opened && netif_msg_link(cp))
-                       printk(KERN_INFO "%s: Link down\n",
-                              cp->dev->name);
+               if (cp->opened)
+                       netif_info(cp, link, cp->dev, "Link down\n");
                restart = 1;
-               
+
        } else if (++cp->timer_ticks > 10)
                cas_mdio_link_not_up(cp);
-               
+
        return restart;
 }
 
@@ -1693,23 +1716,23 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
        if (!stat)
                return 0;
 
-       printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
-              readl(cp->regs + REG_BIM_DIAG));
+       netdev_err(dev, "PCI error [%04x:%04x]",
+                  stat, readl(cp->regs + REG_BIM_DIAG));
 
        /* cassini+ has this reserved */
        if ((stat & PCI_ERR_BADACK) &&
            ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
-               printk("<No ACK64# during ABS64 cycle> ");
+               pr_cont(" <No ACK64# during ABS64 cycle>");
 
        if (stat & PCI_ERR_DTRTO)
-               printk("<Delayed transaction timeout> ");
+               pr_cont(" <Delayed transaction timeout>");
        if (stat & PCI_ERR_OTHER)
-               printk("<other> ");
+               pr_cont(" <other>");
        if (stat & PCI_ERR_BIM_DMA_WRITE)
-               printk("<BIM DMA 0 write req> ");
+               pr_cont(" <BIM DMA 0 write req>");
        if (stat & PCI_ERR_BIM_DMA_READ)
-               printk("<BIM DMA 0 read req> ");
-       printk("\n");
+               pr_cont(" <BIM DMA 0 read req>");
+       pr_cont("\n");
 
        if (stat & PCI_ERR_OTHER) {
                u16 cfg;
@@ -1718,25 +1741,19 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
                 * true cause.
                 */
                pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
-               printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
-                      dev->name, cfg);
+               netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
                if (cfg & PCI_STATUS_PARITY)
-                       printk(KERN_ERR "%s: PCI parity error detected.\n",
-                              dev->name);
+                       netdev_err(dev, "PCI parity error detected\n");
                if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
-                       printk(KERN_ERR "%s: PCI target abort.\n",
-                              dev->name);
+                       netdev_err(dev, "PCI target abort\n");
                if (cfg & PCI_STATUS_REC_TARGET_ABORT)
-                       printk(KERN_ERR "%s: PCI master acks target abort.\n",
-                              dev->name);
+                       netdev_err(dev, "PCI master acks target abort\n");
                if (cfg & PCI_STATUS_REC_MASTER_ABORT)
-                       printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
+                       netdev_err(dev, "PCI master abort\n");
                if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
-                       printk(KERN_ERR "%s: PCI system error SERR#.\n",
-                              dev->name);
+                       netdev_err(dev, "PCI system error SERR#\n");
                if (cfg & PCI_STATUS_DETECTED_PARITY)
-                       printk(KERN_ERR "%s: PCI parity error.\n",
-                              dev->name);
+                       netdev_err(dev, "PCI parity error\n");
 
                /* Write the error bits back to clear them. */
                cfg &= (PCI_STATUS_PARITY |
@@ -1762,9 +1779,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
 {
        if (status & INTR_RX_TAG_ERROR) {
                /* corrupt RX tag framing */
-               if (netif_msg_rx_err(cp))
-                       printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
-                               cp->dev->name);
+               netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                            "corrupt rx tag framing\n");
                spin_lock(&cp->stat_lock[0]);
                cp->net_stats[0].rx_errors++;
                spin_unlock(&cp->stat_lock[0]);
@@ -1773,9 +1789,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
 
        if (status & INTR_RX_LEN_MISMATCH) {
                /* length mismatch. */
-               if (netif_msg_rx_err(cp))
-                       printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
-                               cp->dev->name);
+               netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                            "length mismatch for rx frame\n");
                spin_lock(&cp->stat_lock[0]);
                cp->net_stats[0].rx_errors++;
                spin_unlock(&cp->stat_lock[0]);
@@ -1817,12 +1832,11 @@ do_reset:
 #if 1
        atomic_inc(&cp->reset_task_pending);
        atomic_inc(&cp->reset_task_pending_all);
-       printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
-              dev->name, status);
+       netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
        schedule_work(&cp->reset_task);
 #else
        atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
-       printk(KERN_ERR "reset called in cas_abnormal_irq\n");
+       netdev_err(dev, "reset called in cas_abnormal_irq\n");
        schedule_work(&cp->reset_task);
 #endif
        return 1;
@@ -1876,13 +1890,12 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
                if (count < 0)
                        break;
 
-               if (netif_msg_tx_done(cp))
-                       printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
-                              cp->dev->name, ring, entry);
+               netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+                            "tx[%d] done, slot %d\n", ring, entry);
 
                skbs[entry] = NULL;
                cp->tx_tiny_use[ring][entry].nbufs = 0;
-               
+
                for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
                        struct cas_tx_desc *txd = txds + entry;
 
@@ -1897,7 +1910,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
                        if (cp->tx_tiny_use[ring][entry].used) {
                                cp->tx_tiny_use[ring][entry].used = 0;
                                entry = TX_DESC_NEXT(ring, entry);
-                       } 
+                       }
                }
 
                spin_lock(&cp->stat_lock[ring]);
@@ -1925,9 +1938,9 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
 #ifdef USE_TX_COMPWB
        u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
 #endif
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
-                       cp->dev->name, status, compwb);
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "tx interrupt, status: 0x%x, %llx\n",
+                    status, (unsigned long long)compwb);
        /* process all the rings */
        for (ring = 0; ring < N_TX_RINGS; ring++) {
 #ifdef USE_TX_COMPWB
@@ -1938,14 +1951,14 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
 #else
                limit = readl(cp->regs + REG_TX_COMPN(ring));
 #endif
-               if (cp->tx_old[ring] != limit) 
+               if (cp->tx_old[ring] != limit)
                        cas_tx_ringN(cp, ring, limit);
        }
 }
 
 
-static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, 
-                             int entry, const u64 *words, 
+static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+                             int entry, const u64 *words,
                              struct sk_buff **skbref)
 {
        int dlen, hlen, len, i, alloclen;
@@ -1953,23 +1966,23 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
        struct cas_page *page;
        struct sk_buff *skb;
        void *addr, *crcaddr;
-       char *p; 
+       __sum16 csum;
+       char *p;
 
        hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
        dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
        len  = hlen + dlen;
 
-       if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) 
+       if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
                alloclen = len;
-       else 
+       else
                alloclen = max(hlen, RX_COPY_MIN);
 
        skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
-       if (skb == NULL) 
+       if (skb == NULL)
                return -1;
 
        *skbref = skb;
-       skb->dev = cp->dev;
        skb_reserve(skb, swivel);
 
        p = skb->data;
@@ -1977,7 +1990,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
        if (hlen) { /* always copy header pages */
                i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
                page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-               off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + 
+               off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
                        swivel;
 
                i = hlen;
@@ -1993,7 +2006,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                RX_USED_ADD(page, 0x100);
                p += hlen;
                swivel = 0;
-       } 
+       }
 
 
        if (alloclen < (hlen + dlen)) {
@@ -2006,10 +2019,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                hlen = min(cp->page_size - off, dlen);
                if (hlen < 0) {
-                       if (netif_msg_rx_err(cp)) {
-                               printk(KERN_DEBUG "%s: rx page overflow: "
-                                      "%d\n", cp->dev->name, hlen);
-                       }
+                       netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                                    "rx page overflow: %d\n", hlen);
                        dev_kfree_skb_irq(skb);
                        return -1;
                }
@@ -2037,13 +2048,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                skb_shinfo(skb)->nr_frags++;
                skb->data_len += hlen - swivel;
+               skb->truesize += hlen - swivel;
                skb->len      += hlen - swivel;
 
                get_page(page->buffer);
                frag->page = page->buffer;
                frag->page_offset = off;
                frag->size = hlen - swivel;
-               
+
                /* any more data? */
                if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
                        hlen = dlen;
@@ -2051,8 +2063,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                        i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
                        page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 
-                                           hlen + cp->crc_size, 
+                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+                                           hlen + cp->crc_size,
                                            PCI_DMA_FROMDEVICE);
                        pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
                                            hlen + cp->crc_size,
@@ -2060,7 +2072,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                        skb_shinfo(skb)->nr_frags++;
                        skb->data_len += hlen;
-                       skb->len      += hlen; 
+                       skb->len      += hlen;
                        frag++;
 
                        get_page(page->buffer);
@@ -2085,10 +2097,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
                hlen = min(cp->page_size - off, dlen);
                if (hlen < 0) {
-                       if (netif_msg_rx_err(cp)) {
-                               printk(KERN_DEBUG "%s: rx page overflow: "
-                                      "%d\n", cp->dev->name, hlen);
-                       }
+                       netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                                    "rx page overflow: %d\n", hlen);
                        dev_kfree_skb_irq(skb);
                        return -1;
                }
@@ -2106,14 +2116,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                        RX_USED_ADD(page, cp->mtu_stride);
                else
                        RX_USED_ADD(page, i);
-       
+
                /* any more data? */
                if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
                        p += hlen;
                        i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
                        page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 
-                                           dlen + cp->crc_size, 
+                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+                                           dlen + cp->crc_size,
                                            PCI_DMA_FROMDEVICE);
                        addr = cas_page_map(page->buffer);
                        memcpy(p, addr, dlen + cp->crc_size);
@@ -2121,7 +2131,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                                            dlen + cp->crc_size,
                                            PCI_DMA_FROMDEVICE);
                        cas_page_unmap(addr);
-                       RX_USED_ADD(page, dlen + cp->crc_size); 
+                       RX_USED_ADD(page, dlen + cp->crc_size);
                }
 end_copy_pkt:
                if (cp->crc_size) {
@@ -2131,22 +2141,26 @@ end_copy_pkt:
                skb_put(skb, alloclen);
        }
 
-       i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
+       csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
        if (cp->crc_size) {
                /* checksum includes FCS. strip it out. */
-               i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
+               csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
+                                             csum_unfold(csum)));
                if (addr)
                        cas_page_unmap(addr);
        }
-       skb->csum = ntohs(i ^ 0xffff);
-       skb->ip_summed = CHECKSUM_HW;
        skb->protocol = eth_type_trans(skb, cp->dev);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               skb->csum = csum_unfold(~csum);
+               skb->ip_summed = CHECKSUM_COMPLETE;
+       } else
+               skb->ip_summed = CHECKSUM_NONE;
        return len;
 }
 
 
 /* we can handle up to 64 rx flows at a time. we do the same thing
- * as nonreassm except that we batch up the buffers. 
+ * as nonreassm except that we batch up the buffers.
  * NOTE: we currently just treat each flow as a bunch of packets that
  *       we pass up. a better way would be to coalesce the packets
  *       into a jumbo packet. to do that, we need to do the following:
@@ -2156,7 +2170,7 @@ end_copy_pkt:
  *          data length and merge the checksums.
  *       3) on flow release, fix up the header.
  *       4) make sure the higher layer doesn't care.
- * because packets get coalesced, we shouldn't run into fragment count 
+ * because packets get coalesced, we shouldn't run into fragment count
  * issues.
  */
 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
@@ -2164,12 +2178,12 @@ static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
 {
        int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
        struct sk_buff_head *flow = &cp->rx_flows[flowid];
-       
-       /* this is protected at a higher layer, so no need to 
+
+       /* this is protected at a higher layer, so no need to
         * do any additional locking here. stick the buffer
         * at the end.
         */
-       __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
+       __skb_queue_tail(flow, skb);
        if (words[0] & RX_COMP1_RELEASE_FLOW) {
                while ((skb = __skb_dequeue(flow))) {
                        cas_skb_release(skb);
@@ -2190,19 +2204,19 @@ static void cas_post_page(struct cas *cp, const int ring, const int index)
        new = cas_page_swap(cp, ring, index);
        cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
        cp->init_rxds[ring][entry].index  =
-               cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | 
+               cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
                            CAS_BASE(RX_INDEX_RING, ring));
 
        entry = RX_DESC_ENTRY(ring, entry + 1);
        cp->rx_old[ring] = entry;
-       
+
        if (entry % 4)
                return;
 
        if (ring == 0)
                writel(entry, cp->regs + REG_RX_KICK);
        else if ((N_RX_DESC_RINGS > 1) &&
-                (cp->cas_flags & CAS_FLAG_REG_PLUS)) 
+                (cp->cas_flags & CAS_FLAG_REG_PLUS))
                writel(entry, cp->regs + REG_PLUS_RX_KICK1);
 }
 
@@ -2216,12 +2230,11 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
 
        entry = cp->rx_old[ring];
 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
-                      cp->dev->name, ring, entry);
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "rxd[%d] interrupt, done: %d\n", ring, entry);
 
        cluster = -1;
-       count = entry & 0x3; 
+       count = entry & 0x3;
        last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
        released = 0;
        while (entry != last) {
@@ -2229,12 +2242,12 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
                if (page_count(page[entry]->buffer) > 1) {
                        cas_page_t *new = cas_page_dequeue(cp);
                        if (!new) {
-                               /* let the timer know that we need to 
+                               /* let the timer know that we need to
                                 * do this again
                                 */
                                cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
                                if (!timer_pending(&cp->link_timer))
-                                       mod_timer(&cp->link_timer, jiffies + 
+                                       mod_timer(&cp->link_timer, jiffies +
                                                  CAS_LINK_FAST_TIMEOUT);
                                cp->rx_old[ring]  = entry;
                                cp->rx_last[ring] = num ? num - released : 0;
@@ -2243,10 +2256,10 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
                        spin_lock(&cp->rx_inuse_lock);
                        list_add(&page[entry]->list, &cp->rx_inuse_list);
                        spin_unlock(&cp->rx_inuse_lock);
-                       cp->init_rxds[ring][entry].buffer = 
+                       cp->init_rxds[ring][entry].buffer =
                                cpu_to_le64(new->dma_addr);
                        page[entry] = new;
-                       
+
                }
 
                if (++count == 4) {
@@ -2258,13 +2271,13 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
        }
        cp->rx_old[ring] = entry;
 
-       if (cluster < 0) 
+       if (cluster < 0)
                return 0;
 
        if (ring == 0)
                writel(cluster, cp->regs + REG_RX_KICK);
        else if ((N_RX_DESC_RINGS > 1) &&
-                (cp->cas_flags & CAS_FLAG_REG_PLUS)) 
+                (cp->cas_flags & CAS_FLAG_REG_PLUS))
                writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
        return 0;
 }
@@ -2273,14 +2286,14 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
 /* process a completion ring. packets are set up in three basic ways:
  * small packets: should be copied header + data in single buffer.
  * large packets: header and data in a single buffer.
- * split packets: header in a separate buffer from data. 
+ * split packets: header in a separate buffer from data.
  *                data may be in multiple pages. data may be > 256
- *                bytes but in a single page. 
+ *                bytes but in a single page.
  *
  * NOTE: RX page posting is done in this routine as well. while there's
  *       the capability of using multiple RX completion rings, it isn't
  *       really worthwhile due to the fact that the page posting will
- *       force serialization on the single descriptor ring. 
+ *       force serialization on the single descriptor ring.
  */
 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
 {
@@ -2288,17 +2301,16 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
        int entry, drops;
        int npackets = 0;
 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
-                      cp->dev->name, ring,
-                      readl(cp->regs + REG_RX_COMP_HEAD), 
-                      cp->rx_new[ring]);
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "rx[%d] interrupt, done: %d/%d\n",
+                    ring,
+                    readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
 
        entry = cp->rx_new[ring];
        drops = 0;
        while (1) {
                struct cas_rx_comp *rxc = rxcs + entry;
-               struct sk_buff *skb;
+               struct sk_buff *uninitialized_var(skb);
                int type, len;
                u64 words[4];
                int i, dring;
@@ -2347,7 +2359,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
                 */
                if (RX_DONT_BATCH || (type == 0x2)) {
                        /* non-reassm: these always get released */
-                       cas_skb_release(skb); 
+                       cas_skb_release(skb);
                } else {
                        cas_rx_flow_pkt(cp, words, skb);
                }
@@ -2356,7 +2368,6 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
                cp->net_stats[ring].rx_packets++;
                cp->net_stats[ring].rx_bytes += len;
                spin_unlock(&cp->stat_lock[ring]);
-               cp->dev->last_rx = jiffies;
 
        next:
                npackets++;
@@ -2368,7 +2379,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
                        i = CAS_VAL(RX_INDEX_NUM, i);
                        cas_post_page(cp, dring, i);
                }
-               
+
                if (words[0] & RX_COMP1_RELEASE_DATA) {
                        i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
                        dring = CAS_VAL(RX_INDEX_RING, i);
@@ -2384,7 +2395,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
                }
 
                /* skip to the next entry */
-               entry = RX_COMP_ENTRY(ring, entry + 1 + 
+               entry = RX_COMP_ENTRY(ring, entry + 1 +
                                      CAS_VAL(RX_COMP1_SKIP, words[0]));
 #ifdef USE_NAPI
                if (budget && (npackets >= budget))
@@ -2394,8 +2405,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
        cp->rx_new[ring] = entry;
 
        if (drops)
-               printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
-                      cp->dev->name);
+               netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
        return npackets;
 }
 
@@ -2408,12 +2418,11 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
        int last, entry;
 
        last = cp->rx_cur[ring];
-       entry = cp->rx_new[ring]; 
-       if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
-                      dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
-                      entry);
-       
+       entry = cp->rx_new[ring];
+       netif_printk(cp, intr, KERN_DEBUG, dev,
+                    "rxc[%d] interrupt, done: %d/%d\n",
+                    ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
+
        /* zero and re-mark descriptors */
        while (last != entry) {
                cas_rxc_init(rxc + last);
@@ -2423,25 +2432,25 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
 
        if (ring == 0)
                writel(last, cp->regs + REG_RX_COMP_TAIL);
-       else if (cp->cas_flags & CAS_FLAG_REG_PLUS) 
+       else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
                writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
 }
 
 
 
-/* cassini can use all four PCI interrupts for the completion ring. 
+/* cassini can use all four PCI interrupts for the completion ring.
  * rings 3 and 4 are identical
  */
 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
-static inline void cas_handle_irqN(struct net_device *dev, 
+static inline void cas_handle_irqN(struct net_device *dev,
                                   struct cas *cp, const u32 status,
                                   const int ring)
 {
-       if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) 
+       if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
                cas_post_rxcs_ringN(dev, cp, ring);
 }
 
-static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cas_interruptN(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct cas *cp = netdev_priv(dev);
@@ -2458,7 +2467,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
        if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
 #ifdef USE_NAPI
                cas_mask_intr(cp);
-               netif_rx_schedule(dev);
+               napi_schedule(&cp->napi);
 #else
                cas_rx_ringN(cp, ring, 0);
 #endif
@@ -2477,7 +2486,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
 {
        if (status & INTR_RX_BUF_UNAVAIL_1) {
-               /* Frame arrived, no free RX buffers available. 
+               /* Frame arrived, no free RX buffers available.
                 * NOTE: we can get this on a link transition. */
                cas_post_rxds_ringN(cp, 1, 0);
                spin_lock(&cp->stat_lock[1]);
@@ -2485,8 +2494,8 @@ static inline void cas_handle_irq1(struct cas *cp, const u32 status)
                spin_unlock(&cp->stat_lock[1]);
        }
 
-       if (status & INTR_RX_BUF_AE_1) 
-               cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - 
+       if (status & INTR_RX_BUF_AE_1)
+               cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
                                    RX_AE_FREEN_VAL(1));
 
        if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
@@ -2494,7 +2503,7 @@ static inline void cas_handle_irq1(struct cas *cp, const u32 status)
 }
 
 /* ring 2 handles a few more events than 3 and 4 */
-static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cas_interrupt1(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct cas *cp = netdev_priv(dev);
@@ -2509,7 +2518,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
        if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
 #ifdef USE_NAPI
                cas_mask_intr(cp);
-               netif_rx_schedule(dev);
+               napi_schedule(&cp->napi);
 #else
                cas_rx_ringN(cp, 1, 0);
 #endif
@@ -2530,7 +2539,7 @@ static inline void cas_handle_irq(struct net_device *dev,
                cas_abnormal_irq(dev, cp, status);
 
        if (status & INTR_RX_BUF_UNAVAIL) {
-               /* Frame arrived, no free RX buffers available. 
+               /* Frame arrived, no free RX buffers available.
                 * NOTE: we can get this on a link transition.
                 */
                cas_post_rxds_ringN(cp, 0, 0);
@@ -2546,7 +2555,7 @@ static inline void cas_handle_irq(struct net_device *dev,
                cas_post_rxcs_ringN(dev, cp, 0);
 }
 
-static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cas_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct cas *cp = netdev_priv(dev);
@@ -2565,7 +2574,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
        if (status & INTR_RX_DONE) {
 #ifdef USE_NAPI
                cas_mask_intr(cp);
-               netif_rx_schedule(dev);
+               napi_schedule(&cp->napi);
 #else
                cas_rx_ringN(cp, 0, 0);
 #endif
@@ -2580,10 +2589,11 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
 
 #ifdef USE_NAPI
-static int cas_poll(struct net_device *dev, int *budget)
+static int cas_poll(struct napi_struct *napi, int budget)
 {
-       struct cas *cp = netdev_priv(dev);
-       int i, enable_intr, todo, credits;
+       struct cas *cp = container_of(napi, struct cas, napi);
+       struct net_device *dev = cp->dev;
+       int i, enable_intr, credits;
        u32 status = readl(cp->regs + REG_INTR_STATUS);
        unsigned long flags;
 
@@ -2593,20 +2603,18 @@ static int cas_poll(struct net_device *dev, int *budget)
 
        /* NAPI rx packets. we spread the credits across all of the
         * rxc rings
-        */
-       todo = min(*budget, dev->quota);
-
-       /* to make sure we're fair with the work we loop through each
-        * ring N_RX_COMP_RING times with a request of 
-        * todo / N_RX_COMP_RINGS
+        *
+        * to make sure we're fair with the work we loop through each
+        * ring N_RX_COMP_RING times with a request of
+        * budget / N_RX_COMP_RINGS
         */
        enable_intr = 1;
        credits = 0;
        for (i = 0; i < N_RX_COMP_RINGS; i++) {
                int j;
                for (j = 0; j < N_RX_COMP_RINGS; j++) {
-                       credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
-                       if (credits >= todo) {
+                       credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+                       if (credits >= budget) {
                                enable_intr = 0;
                                goto rx_comp;
                        }
@@ -2614,9 +2622,6 @@ static int cas_poll(struct net_device *dev, int *budget)
        }
 
 rx_comp:
-       *budget    -= credits;
-       dev->quota -= credits;
-
        /* final rx completion */
        spin_lock_irqsave(&cp->lock, flags);
        if (status)
@@ -2647,11 +2652,10 @@ rx_comp:
 #endif
        spin_unlock_irqrestore(&cp->lock, flags);
        if (enable_intr) {
-               netif_rx_complete(dev);
+               napi_complete(napi);
                cas_unmask_intr(cp);
-               return 0;
        }
-       return 1;
+       return credits;
 }
 #endif
 
@@ -2661,7 +2665,7 @@ static void cas_netpoll(struct net_device *dev)
        struct cas *cp = netdev_priv(dev);
 
        cas_disable_irq(cp, 0);
-       cas_interrupt(cp->pdev->irq, dev, NULL);
+       cas_interrupt(cp->pdev->irq, dev);
        cas_enable_irq(cp, 0);
 
 #ifdef USE_PCI_INTB
@@ -2686,42 +2690,38 @@ static void cas_tx_timeout(struct net_device *dev)
 {
        struct cas *cp = netdev_priv(dev);
 
-       printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+       netdev_err(dev, "transmit timed out, resetting\n");
        if (!cp->hw_running) {
-               printk("%s: hrm.. hw not running!\n", dev->name);
+               netdev_err(dev, "hrm.. hw not running!\n");
                return;
        }
 
-       printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
-              dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
-
-       printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
-              dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
-
-       printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
-              "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
-              dev->name,
-              readl(cp->regs + REG_TX_CFG),
-              readl(cp->regs + REG_MAC_TX_STATUS),
-              readl(cp->regs + REG_MAC_TX_CFG),
-              readl(cp->regs + REG_TX_FIFO_PKT_CNT),
-              readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
-              readl(cp->regs + REG_TX_FIFO_READ_PTR),
-              readl(cp->regs + REG_TX_SM_1),
-              readl(cp->regs + REG_TX_SM_2));
-
-       printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
-              dev->name,
-              readl(cp->regs + REG_RX_CFG),
-              readl(cp->regs + REG_MAC_RX_STATUS),
-              readl(cp->regs + REG_MAC_RX_CFG));
-
-       printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
-              dev->name,
-              readl(cp->regs + REG_HP_STATE_MACHINE),
-              readl(cp->regs + REG_HP_STATUS0),
-              readl(cp->regs + REG_HP_STATUS1),
-              readl(cp->regs + REG_HP_STATUS2));
+       netdev_err(dev, "MIF_STATE[%08x]\n",
+                  readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+       netdev_err(dev, "MAC_STATE[%08x]\n",
+                  readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+       netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+                  readl(cp->regs + REG_TX_CFG),
+                  readl(cp->regs + REG_MAC_TX_STATUS),
+                  readl(cp->regs + REG_MAC_TX_CFG),
+                  readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+                  readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+                  readl(cp->regs + REG_TX_FIFO_READ_PTR),
+                  readl(cp->regs + REG_TX_SM_1),
+                  readl(cp->regs + REG_TX_SM_2));
+
+       netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+                  readl(cp->regs + REG_RX_CFG),
+                  readl(cp->regs + REG_MAC_RX_STATUS),
+                  readl(cp->regs + REG_MAC_RX_CFG));
+
+       netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+                  readl(cp->regs + REG_HP_STATE_MACHINE),
+                  readl(cp->regs + REG_HP_STATUS0),
+                  readl(cp->regs + REG_HP_STATUS1),
+                  readl(cp->regs + REG_HP_STATUS2));
 
 #if 1
        atomic_inc(&cp->reset_task_pending);
@@ -2756,13 +2756,13 @@ static void cas_write_txd(struct cas *cp, int ring, int entry,
        txd->buffer = cpu_to_le64(mapping);
 }
 
-static inline void *tx_tiny_buf(struct cas *cp, const int ring, 
+static inline void *tx_tiny_buf(struct cas *cp, const int ring,
                                const int entry)
 {
        return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
 }
 
-static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, 
+static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
                                     const int entry, const int tentry)
 {
        cp->tx_tiny_use[ring][tentry].nbufs++;
@@ -2770,7 +2770,7 @@ static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
        return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
 }
 
-static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, 
+static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
                                    struct sk_buff *skb)
 {
        struct net_device *dev = cp->dev;
@@ -2783,23 +2783,20 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
        spin_lock_irqsave(&cp->tx_lock[ring], flags);
 
        /* This is a hard error, log it. */
-       if (TX_BUFFS_AVAIL(cp, ring) <= 
+       if (TX_BUFFS_AVAIL(cp, ring) <=
            CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
                netif_stop_queue(dev);
                spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
-               printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
-                      "queue awake!\n", dev->name);
+               netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
                return 1;
        }
 
        ctrl = 0;
-       if (skb->ip_summed == CHECKSUM_HW) {
-               u64 csum_start_off, csum_stuff_off;
-
-               csum_start_off = (u64) (skb->h.raw - skb->data);
-               csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               const u64 csum_start_off = skb_transport_offset(skb);
+               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
 
-               ctrl =  TX_DESC_CSUM_EN | 
+               ctrl =  TX_DESC_CSUM_EN |
                        CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
                        CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
        }
@@ -2817,17 +2814,17 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
        tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
        if (unlikely(tabort)) {
                /* NOTE: len is always >  tabort */
-               cas_write_txd(cp, ring, entry, mapping, len - tabort, 
+               cas_write_txd(cp, ring, entry, mapping, len - tabort,
                              ctrl | TX_DESC_SOF, 0);
                entry = TX_DESC_NEXT(ring, entry);
 
-               memcpy(tx_tiny_buf(cp, ring, entry), skb->data + 
-                      len - tabort, tabort);
+               skb_copy_from_linear_data_offset(skb, len - tabort,
+                             tx_tiny_buf(cp, ring, entry), tabort);
                mapping = tx_tiny_map(cp, ring, entry, tentry);
                cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
                              (nr_frags == 0));
        } else {
-               cas_write_txd(cp, ring, entry, mapping, len, ctrl | 
+               cas_write_txd(cp, ring, entry, mapping, len, ctrl |
                              TX_DESC_SOF, (nr_frags == 0));
        }
        entry = TX_DESC_NEXT(ring, entry);
@@ -2848,10 +2845,10 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
                        cas_write_txd(cp, ring, entry, mapping, len - tabort,
                                      ctrl, 0);
                        entry = TX_DESC_NEXT(ring, entry);
-                       
+
                        addr = cas_page_map(fragp->page);
                        memcpy(tx_tiny_buf(cp, ring, entry),
-                              addr + fragp->page_offset + len - tabort, 
+                              addr + fragp->page_offset + len - tabort,
                               tabort);
                        cas_page_unmap(addr);
                        mapping = tx_tiny_map(cp, ring, entry, tentry);
@@ -2867,36 +2864,32 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
        if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
                netif_stop_queue(dev);
 
-       if (netif_msg_tx_queued(cp))
-               printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
-                      "avail %d\n",
-                      dev->name, ring, entry, skb->len, 
-                      TX_BUFFS_AVAIL(cp, ring));
+       netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+                    "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+                    ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
        writel(entry, cp->regs + REG_TX_KICKN(ring));
        spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
        return 0;
-} 
+}
 
-static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct cas *cp = netdev_priv(dev);
 
        /* this is only used as a load-balancing hint, so it doesn't
         * need to be SMP safe
         */
-       static int ring; 
+       static int ring;
 
-       skb = skb_padto(skb, cp->min_frame_size);
-       if (!skb)
-               return 0;
+       if (skb_padto(skb, cp->min_frame_size))
+               return NETDEV_TX_OK;
 
        /* XXX: we need some higher-level QoS hooks to steer packets to
         *      individual queues.
         */
        if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
-               return 1;
-       dev->trans_start = jiffies;
-       return 0;
+               return NETDEV_TX_BUSY;
+       return NETDEV_TX_OK;
 }
 
 static void cas_init_tx_dma(struct cas *cp)
@@ -2916,14 +2909,14 @@ static void cas_init_tx_dma(struct cas *cp)
        /* enable completion writebacks, enable paced mode,
         * disable read pipe, and disable pre-interrupt compwbs
         */
-       val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | 
+       val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
                TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
-               TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | 
+               TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
                TX_CFG_INTR_COMPWB_DIS;
 
        /* write out tx ring info and tx desc bases */
        for (i = 0; i < MAX_TX_RINGS; i++) {
-               off = (unsigned long) cp->init_txds[i] - 
+               off = (unsigned long) cp->init_txds[i] -
                        (unsigned long) cp->init_block;
 
                val |= CAS_TX_RINGN_BASE(i);
@@ -2959,12 +2952,46 @@ static inline void cas_init_dma(struct cas *cp)
        cas_init_rx_dma(cp);
 }
 
+static void cas_process_mc_list(struct cas *cp)
+{
+       u16 hash_table[16];
+       u32 crc;
+       struct netdev_hw_addr *ha;
+       int i = 1;
+
+       memset(hash_table, 0, sizeof(hash_table));
+       netdev_for_each_mc_addr(ha, cp->dev) {
+               if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+                       /* use the alternate mac address registers for the
+                        * first 15 multicast addresses
+                        */
+                       writel((ha->addr[4] << 8) | ha->addr[5],
+                              cp->regs + REG_MAC_ADDRN(i*3 + 0));
+                       writel((ha->addr[2] << 8) | ha->addr[3],
+                              cp->regs + REG_MAC_ADDRN(i*3 + 1));
+                       writel((ha->addr[0] << 8) | ha->addr[1],
+                              cp->regs + REG_MAC_ADDRN(i*3 + 2));
+                       i++;
+               }
+               else {
+                       /* use hw hash table for the next series of
+                        * multicast addresses
+                        */
+                       crc = ether_crc_le(ETH_ALEN, ha->addr);
+                       crc >>= 24;
+                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+               }
+       }
+       for (i = 0; i < 16; i++)
+               writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
 /* Must be invoked under cp->lock. */
 static u32 cas_setup_multicast(struct cas *cp)
 {
        u32 rxcfg = 0;
        int i;
-       
+
        if (cp->dev->flags & IFF_PROMISC) {
                rxcfg |= MAC_RX_CFG_PROMISC_EN;
 
@@ -2974,43 +3001,7 @@ static u32 cas_setup_multicast(struct cas *cp)
                rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
 
        } else {
-               u16 hash_table[16];
-               u32 crc;
-               struct dev_mc_list *dmi = cp->dev->mc_list;
-               int i;
-
-               /* use the alternate mac address registers for the
-                * first 15 multicast addresses
-                */
-               for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
-                       if (!dmi) {
-                               writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
-                               writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
-                               writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
-                               continue;
-                       }
-                       writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 
-                              cp->regs + REG_MAC_ADDRN(i*3 + 0));
-                       writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 
-                              cp->regs + REG_MAC_ADDRN(i*3 + 1));
-                       writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 
-                              cp->regs + REG_MAC_ADDRN(i*3 + 2));
-                       dmi = dmi->next;
-               }
-
-               /* use hw hash table for the next series of 
-                * multicast addresses
-                */
-               memset(hash_table, 0, sizeof(hash_table));
-               while (dmi) {
-                       crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
-                       crc >>= 24;
-                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
-                       dmi = dmi->next;
-               }
-               for (i=0; i < 16; i++)
-                       writel(hash_table[i], cp->regs + 
-                              REG_MAC_HASH_TABLEN(i));
+               cas_process_mc_list(cp);
                rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
        }
 
@@ -3060,10 +3051,10 @@ static void cas_mac_reset(struct cas *cp)
 
        if (readl(cp->regs + REG_MAC_TX_RESET) |
            readl(cp->regs + REG_MAC_RX_RESET))
-               printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
-                      cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
-                      readl(cp->regs + REG_MAC_RX_RESET),
-                      readl(cp->regs + REG_MAC_STATE_MACHINE));
+               netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+                          readl(cp->regs + REG_MAC_TX_RESET),
+                          readl(cp->regs + REG_MAC_RX_RESET),
+                          readl(cp->regs + REG_MAC_STATE_MACHINE));
 }
 
 
@@ -3094,23 +3085,23 @@ static void cas_init_mac(struct cas *cp)
        writel(0x00, cp->regs + REG_MAC_IPG0);
        writel(0x08, cp->regs + REG_MAC_IPG1);
        writel(0x04, cp->regs + REG_MAC_IPG2);
-       
+
        /* change later for 802.3z */
-       writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 
+       writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
 
        /* min frame + FCS */
        writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
 
        /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
-        * specify the maximum frame size to prevent RX tag errors on 
+        * specify the maximum frame size to prevent RX tag errors on
         * oversized frames.
         */
        writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
-              CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, 
-                       (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), 
+              CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+                       (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
               cp->regs + REG_MAC_FRAMESIZE_MAX);
 
-       /* NOTE: crc_size is used as a surrogate for half-duplex. 
+       /* NOTE: crc_size is used as a surrogate for half-duplex.
         * workaround saturn half-duplex issue by increasing preamble
         * size to 65 bytes.
         */
@@ -3153,7 +3144,7 @@ static void cas_init_mac(struct cas *cp)
         * spin_lock_irqsave, but we are called only in cas_init_hw and
         * cas_init_hw is protected by cas_lock_all, which calls
         * spin_lock_irq (so it doesn't need to save the flags, and
-        * we should be OK for the writel, as that is the only 
+        * we should be OK for the writel, as that is the only
         * difference).
         */
        cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
@@ -3202,7 +3193,7 @@ static int cas_vpd_match(const void __iomem *p, const char *str)
 {
        int len = strlen(str) + 1;
        int i;
-       
+
        for (i = 0; i < len; i++) {
                if (readb(p + i) != str[i])
                        return 0;
@@ -3219,7 +3210,7 @@ static int cas_vpd_match(const void __iomem *p, const char *str)
  *     number.
  *  3) fiber cards don't have bridges, so their slot numbers don't
  *     mean anything.
- *  4) we don't actually know we have a fiber card until after 
+ *  4) we don't actually know we have a fiber card until after
  *     the mac addresses are parsed.
  */
 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
@@ -3251,15 +3242,15 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
                    (readb(p + i + 1) == 0x43) &&
                    (readb(p + i + 2) == 0x49) &&
                    (readb(p + i + 3) == 0x52)) {
-                       base = p + (readb(p + i + 8) | 
+                       base = p + (readb(p + i + 8) |
                                    (readb(p + i + 9) << 8));
                        break;
-               }               
+               }
        }
 
        if (!base || (readb(base) != 0x82))
                goto use_random_mac_addr;
-       
+
        i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
        while (i < EXPANSION_ROM_SIZE) {
                if (readb(base + i) != 0x90) /* no vpd found */
@@ -3277,20 +3268,20 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
                        char type;
 
                        p += 3;
-                       
+
                        /* look for the following things:
                         * -- correct length == 29
-                        * 3 (type) + 2 (size) + 
-                        * 18 (strlen("local-mac-address") + 1) + 
-                        * 6 (mac addr) 
+                        * 3 (type) + 2 (size) +
+                        * 18 (strlen("local-mac-address") + 1) +
+                        * 6 (mac addr)
                         * -- VPD Instance 'I'
                         * -- VPD Type Bytes 'B'
                         * -- VPD data length == 6
                         * -- property string == local-mac-address
-                        * 
+                        *
                         * -- correct length == 24
-                        * 3 (type) + 2 (size) + 
-                        * 12 (strlen("entropy-dev") + 1) + 
+                        * 3 (type) + 2 (size) +
+                        * 12 (strlen("entropy-dev") + 1) +
                         * 7 (strlen("vms110") + 1)
                         * -- VPD Instance 'I'
                         * -- VPD Type String 'B'
@@ -3298,17 +3289,17 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
                         * -- property string == entropy-dev
                         *
                         * -- correct length == 18
-                        * 3 (type) + 2 (size) + 
-                        * 9 (strlen("phy-type") + 1) + 
+                        * 3 (type) + 2 (size) +
+                        * 9 (strlen("phy-type") + 1) +
                         * 4 (strlen("pcs") + 1)
                         * -- VPD Instance 'I'
                         * -- VPD Type String 'S'
                         * -- VPD data length == 4
                         * -- property string == phy-type
-                        * 
+                        *
                         * -- correct length == 23
-                        * 3 (type) + 2 (size) + 
-                        * 14 (strlen("phy-interface") + 1) + 
+                        * 3 (type) + 2 (size) +
+                        * 14 (strlen("phy-interface") + 1) +
                         * 4 (strlen("pcs") + 1)
                         * -- VPD Instance 'I'
                         * -- VPD Type String 'S'
@@ -3322,14 +3313,14 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
                        type = readb(p + 3);
                        if (type == 'B') {
                                if ((klen == 29) && readb(p + 4) == 6 &&
-                                   cas_vpd_match(p + 5, 
+                                   cas_vpd_match(p + 5,
                                                  "local-mac-address")) {
-                                       if (mac_off++ > offset) 
+                                       if (mac_off++ > offset)
                                                goto next;
 
                                        /* set mac address */
-                                       for (j = 0; j < 6; j++) 
-                                               dev_addr[j] = 
+                                       for (j = 0; j < 6; j++)
+                                               dev_addr[j] =
                                                        readb(p + 23 + j);
                                        goto found_mac;
                                }
@@ -3339,7 +3330,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
                                goto next;
 
 #ifdef USE_ENTROPY_DEV
-                       if ((klen == 24) && 
+                       if ((klen == 24) &&
                            cas_vpd_match(p + 5, "entropy-dev") &&
                            cas_vpd_match(p + 17, "vms110")) {
                                cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
@@ -3357,7 +3348,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
                                        goto found_phy;
                                }
                        }
-                       
+
                        if ((klen == 23) && readb(p + 4) == 4 &&
                            cas_vpd_match(p + 5, "phy-interface")) {
                                if (cas_vpd_match(p + 19, "pcs")) {
@@ -3383,7 +3374,7 @@ use_random_mac_addr:
                goto done;
 
        /* Sun MAC prefix then 3 random bytes. */
-       printk(PFX "MAC address not found in ROM VPD\n");
+       pr_info("MAC address not found in ROM VPD\n");
        dev_addr[0] = 0x08;
        dev_addr[1] = 0x00;
        dev_addr[2] = 0x20;
@@ -3398,21 +3389,19 @@ done:
 static void cas_check_pci_invariants(struct cas *cp)
 {
        struct pci_dev *pdev = cp->pdev;
-       u8 rev;
 
        cp->cas_flags = 0;
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
        if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
            (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
-               if (rev >= CAS_ID_REVPLUS)
+               if (pdev->revision >= CAS_ID_REVPLUS)
                        cp->cas_flags |= CAS_FLAG_REG_PLUS;
-               if (rev < CAS_ID_REVPLUS02u)
+               if (pdev->revision < CAS_ID_REVPLUS02u)
                        cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
 
                /* Original Cassini supports HW CSUM, but it's not
                 * enabled by default as it can trigger TX hangs.
                 */
-               if (rev < CAS_ID_REV2)
+               if (pdev->revision < CAS_ID_REV2)
                        cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
        } else {
                /* Only sun has original cassini chips.  */
@@ -3435,18 +3424,18 @@ static int cas_check_invariants(struct cas *cp)
        int i;
 
        /* get page size for rx buffers. */
-       cp->page_order = 0; 
+       cp->page_order = 0;
 #ifdef USE_PAGE_ORDER
        if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
                /* see if we can allocate larger pages */
-               struct page *page = alloc_pages(GFP_ATOMIC, 
-                                               CAS_JUMBO_PAGE_SHIFT - 
+               struct page *page = alloc_pages(GFP_ATOMIC,
+                                               CAS_JUMBO_PAGE_SHIFT -
                                                PAGE_SHIFT);
                if (page) {
                        __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
                        cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
                } else {
-                       printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
+                       printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
                }
        }
 #endif
@@ -3456,15 +3445,15 @@ static int cas_check_invariants(struct cas *cp)
        cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
        cp->rx_fifo_size = RX_FIFO_SIZE;
 
-       /* finish phy determination. MDIO1 takes precedence over MDIO0 if 
+       /* finish phy determination. MDIO1 takes precedence over MDIO0 if
         * they're both connected.
         */
-       cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, 
+       cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
                                        PCI_SLOT(pdev->devfn));
        if (cp->phy_type & CAS_PHY_SERDES) {
                cp->cas_flags |= CAS_FLAG_1000MB_CAP;
                return 0; /* no more checking needed */
-       } 
+       }
 
        /* MII */
        cfg = readl(cp->regs + REG_MIF_CFG);
@@ -3491,14 +3480,14 @@ static int cas_check_invariants(struct cas *cp)
                        }
                }
        }
-       printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
+       pr_err("MII phy did not respond [%08x]\n",
               readl(cp->regs + REG_MIF_STATE_MACHINE));
        return -1;
 
 done:
        /* see if we can do gigabit */
        cfg = cas_phy_read(cp, MII_BMSR);
-       if ((cfg & CAS_BMSR_1000_EXTEND) && 
+       if ((cfg & CAS_BMSR_1000_EXTEND) &&
            cas_phy_read(cp, CAS_MII_1000_EXTEND))
                cp->cas_flags |= CAS_FLAG_1000MB_CAP;
        return 0;
@@ -3510,7 +3499,7 @@ static inline void cas_start_dma(struct cas *cp)
        int i;
        u32 val;
        int txfailed = 0;
-       
+
        /* enable dma */
        val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
        writel(val, cp->regs + REG_TX_CFG);
@@ -3536,21 +3525,19 @@ static inline void cas_start_dma(struct cas *cp)
                val = readl(cp->regs + REG_MAC_RX_CFG);
                if ((val & MAC_RX_CFG_EN)) {
                        if (txfailed) {
-                         printk(KERN_ERR 
-                                "%s: enabling mac failed [tx:%08x:%08x].\n", 
-                                cp->dev->name,
-                                readl(cp->regs + REG_MIF_STATE_MACHINE),
-                                readl(cp->regs + REG_MAC_STATE_MACHINE));
+                               netdev_err(cp->dev,
+                                          "enabling mac failed [tx:%08x:%08x]\n",
+                                          readl(cp->regs + REG_MIF_STATE_MACHINE),
+                                          readl(cp->regs + REG_MAC_STATE_MACHINE));
                        }
                        goto enable_rx_done;
                }
                udelay(10);
        }
-       printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 
-              cp->dev->name,
-              (txfailed? "tx,rx":"rx"),
-              readl(cp->regs + REG_MIF_STATE_MACHINE),
-              readl(cp->regs + REG_MAC_STATE_MACHINE));
+       netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+                  (txfailed ? "tx,rx" : "rx"),
+                  readl(cp->regs + REG_MIF_STATE_MACHINE),
+                  readl(cp->regs + REG_MAC_STATE_MACHINE));
 
 enable_rx_done:
        cas_unmask_intr(cp); /* enable interrupts */
@@ -3558,11 +3545,11 @@ enable_rx_done:
        writel(0, cp->regs + REG_RX_COMP_TAIL);
 
        if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               if (N_RX_DESC_RINGS > 1) 
-                       writel(RX_DESC_RINGN_SIZE(1) - 4, 
+               if (N_RX_DESC_RINGS > 1)
+                       writel(RX_DESC_RINGN_SIZE(1) - 4,
                               cp->regs + REG_PLUS_RX_KICK1);
 
-               for (i = 1; i < N_RX_COMP_RINGS; i++) 
+               for (i = 1; i < N_RX_COMP_RINGS; i++)
                        writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
        }
 }
@@ -3588,7 +3575,7 @@ static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
        *fd = 0;
        *spd = 10;
        *pause = 0;
-       
+
        /* use GMII registers */
        val = cas_phy_read(cp, MII_LPA);
        if (val & CAS_LPA_PAUSE)
@@ -3629,7 +3616,7 @@ static void cas_set_link_modes(struct cas *cp)
                cas_mif_poll(cp, 0);
                val = cas_phy_read(cp, MII_BMCR);
                if (val & BMCR_ANENABLE) {
-                       cas_read_mii_link_mode(cp, &full_duplex, &speed, 
+                       cas_read_mii_link_mode(cp, &full_duplex, &speed,
                                               &pause);
                } else {
                        if (val & BMCR_FULLDPLX)
@@ -3652,9 +3639,8 @@ static void cas_set_link_modes(struct cas *cp)
                }
        }
 
-       if (netif_msg_link(cp))
-               printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
-                      cp->dev->name, speed, (full_duplex ? "full" : "half"));
+       netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+                  speed, full_duplex ? "full" : "half");
 
        val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
        if (CAS_PHY_MII(cp->phy_type)) {
@@ -3662,7 +3648,7 @@ static void cas_set_link_modes(struct cas *cp)
                if (!full_duplex)
                        val |= MAC_XIF_DISABLE_ECHO;
        }
-       if (full_duplex) 
+       if (full_duplex)
                val |= MAC_XIF_FDPLX_LED;
        if (speed == 1000)
                val |= MAC_XIF_GMII_MODE;
@@ -3682,17 +3668,17 @@ static void cas_set_link_modes(struct cas *cp)
        /* val now set up for REG_MAC_TX_CFG */
 
        /* If gigabit and half-duplex, enable carrier extension
-        * mode.  increase slot time to 512 bytes as well. 
+        * mode.  increase slot time to 512 bytes as well.
         * else, disable it and make sure slot time is 64 bytes.
         * also activate checksum bug workaround
         */
        if ((speed == 1000) && !full_duplex) {
-               writel(val | MAC_TX_CFG_CARRIER_EXTEND, 
+               writel(val | MAC_TX_CFG_CARRIER_EXTEND,
                       cp->regs + REG_MAC_TX_CFG);
 
                val = readl(cp->regs + REG_MAC_RX_CFG);
                val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
-               writel(val | MAC_RX_CFG_CARRIER_EXTEND, 
+               writel(val | MAC_RX_CFG_CARRIER_EXTEND,
                       cp->regs + REG_MAC_RX_CFG);
 
                writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
@@ -3704,7 +3690,7 @@ static void cas_set_link_modes(struct cas *cp)
        } else {
                writel(val, cp->regs + REG_MAC_TX_CFG);
 
-               /* checksum bug workaround. don't strip FCS when in 
+               /* checksum bug workaround. don't strip FCS when in
                 * half-duplex mode
                 */
                val = readl(cp->regs + REG_MAC_RX_CFG);
@@ -3717,25 +3703,21 @@ static void cas_set_link_modes(struct cas *cp)
                        cp->crc_size = 4;
                        cp->min_frame_size = CAS_MIN_FRAME;
                }
-               writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, 
+               writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
                       cp->regs + REG_MAC_RX_CFG);
                writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
        }
 
        if (netif_msg_link(cp)) {
                if (pause & 0x01) {
-                       printk(KERN_INFO "%s: Pause is enabled "
-                              "(rxfifo: %d off: %d on: %d)\n",
-                              cp->dev->name,
-                              cp->rx_fifo_size,
-                              cp->rx_pause_off,
-                              cp->rx_pause_on);
+                       netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+                                   cp->rx_fifo_size,
+                                   cp->rx_pause_off,
+                                   cp->rx_pause_on);
                } else if (pause & 0x10) {
-                       printk(KERN_INFO "%s: TX pause enabled\n",
-                              cp->dev->name);
+                       netdev_info(cp->dev, "TX pause enabled\n");
                } else {
-                       printk(KERN_INFO "%s: Pause is disabled\n",
-                              cp->dev->name);
+                       netdev_info(cp->dev, "Pause is disabled\n");
                }
        }
 
@@ -3745,7 +3727,7 @@ static void cas_set_link_modes(struct cas *cp)
                val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
                if (pause & 0x01) { /* symmetric pause */
                        val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
-               } 
+               }
        }
        writel(val, cp->regs + REG_MAC_CTRL_CFG);
        cas_start_dma(cp);
@@ -3777,7 +3759,7 @@ static void cas_init_hw(struct cas *cp, int restart_link)
  */
 static void cas_hard_reset(struct cas *cp)
 {
-       writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); 
+       writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
        udelay(20);
        pci_restore_state(cp->pdev);
 }
@@ -3795,7 +3777,7 @@ static void cas_global_reset(struct cas *cp, int blkflag)
                 * need some special handling if the chip is set into a
                 * loopback mode.
                 */
-               writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), 
+               writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
                       cp->regs + REG_SW_RESET);
        } else {
                writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
@@ -3811,20 +3793,20 @@ static void cas_global_reset(struct cas *cp, int blkflag)
                        goto done;
                udelay(10);
        }
-       printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
+       netdev_err(cp->dev, "sw reset failed\n");
 
 done:
        /* enable various BIM interrupts */
-       writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | 
+       writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
               BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
 
        /* clear out pci error status mask for handled errors.
         * we don't deal with DMA counter overflows as they happen
         * all the time.
         */
-       writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | 
-                              PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | 
-                              PCI_ERR_BIM_DMA_READ), cp->regs + 
+       writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+                              PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+                              PCI_ERR_BIM_DMA_READ), cp->regs +
               REG_PCI_ERR_STATUS_MASK);
 
        /* set up for MII by default to address mac rx reset timeout
@@ -3865,7 +3847,7 @@ static void cas_reset(struct cas *cp, int blkflag)
        spin_unlock(&cp->stat_lock[N_TX_RINGS]);
 }
 
-/* Shut down the chip, must be called with pm_sem held.  */
+/* Shut down the chip, must be called with pm_mutex held.  */
 static void cas_shutdown(struct cas *cp)
 {
        unsigned long flags;
@@ -3885,7 +3867,7 @@ static void cas_shutdown(struct cas *cp)
 #else
        while (atomic_read(&cp->reset_task_pending))
                schedule();
-#endif 
+#endif
        /* Actually stop the chip */
        cas_lock_all_save(cp, flags);
        cas_reset(cp, 0);
@@ -3915,9 +3897,9 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
        }
        schedule_work(&cp->reset_task);
 #else
-       atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 
+       atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
                   CAS_RESET_ALL : CAS_RESET_MTU);
-       printk(KERN_ERR "reset called in cas_change_mtu\n");
+       pr_err("reset called in cas_change_mtu\n");
        schedule_work(&cp->reset_task);
 #endif
 
@@ -3949,7 +3931,7 @@ static void cas_clean_txd(struct cas *cp, int ring)
                         * needs to be unmapped.
                         */
                        daddr = le64_to_cpu(txd[ent].buffer);
-                       dlen  =  CAS_VAL(TX_DESC_BUFLEN, 
+                       dlen  =  CAS_VAL(TX_DESC_BUFLEN,
                                         le64_to_cpu(txd[ent].control));
                        pci_unmap_page(cp->pdev, daddr, dlen,
                                       PCI_DMA_TODEVICE);
@@ -4020,7 +4002,7 @@ static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
 
        size = RX_DESC_RINGN_SIZE(ring);
        for (i = 0; i < size; i++) {
-               if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) 
+               if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
                        return -1;
        }
        return 0;
@@ -4039,9 +4021,9 @@ static int cas_alloc_rxds(struct cas *cp)
        return 0;
 }
 
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
 {
-       struct cas *cp = (struct cas *) data;
+       struct cas *cp = container_of(work, struct cas, reset_task);
 #if 0
        int pending = atomic_read(&cp->reset_task_pending);
 #else
@@ -4087,7 +4069,7 @@ static void cas_reset_task(void *data)
                 * call to cas_init_hw will restart auto negotiation.
                 * Setting the second argument of cas_reset to
                 * !(pending == CAS_RESET_ALL) will set this argument
-                * to 1 (avoiding reinitializing the PHY for the normal 
+                * to 1 (avoiding reinitializing the PHY for the normal
                 * PCS case) when auto negotiation is not restarted.
                 */
 #if 1
@@ -4124,9 +4106,9 @@ static void cas_link_timer(unsigned long data)
 
        if (link_transition_timeout != 0 &&
            cp->link_transition_jiffies_valid &&
-           ((jiffies - cp->link_transition_jiffies) > 
+           ((jiffies - cp->link_transition_jiffies) >
              (link_transition_timeout))) {
-               /* One-second counter so link-down workaround doesn't 
+               /* One-second counter so link-down workaround doesn't
                 * cause resets to occur so fast as to fool the switch
                 * into thinking the link is down.
                 */
@@ -4146,10 +4128,10 @@ static void cas_link_timer(unsigned long data)
 #if 1
        if (atomic_read(&cp->reset_task_pending_all) ||
            atomic_read(&cp->reset_task_pending_spare) ||
-           atomic_read(&cp->reset_task_pending_mtu)) 
+           atomic_read(&cp->reset_task_pending_mtu))
                goto done;
 #else
-       if (atomic_read(&cp->reset_task_pending)) 
+       if (atomic_read(&cp->reset_task_pending))
                goto done;
 #endif
 
@@ -4199,10 +4181,8 @@ static void cas_link_timer(unsigned long data)
 
                if (((tlm == 0x5) || (tlm == 0x3)) &&
                    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
-                       if (netif_msg_tx_err(cp))
-                               printk(KERN_DEBUG "%s: tx err: "
-                                      "MAC_STATE[%08x]\n",
-                                      cp->dev->name, val);
+                       netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+                                    "tx err: MAC_STATE[%08x]\n", val);
                        reset = 1;
                        goto done;
                }
@@ -4211,10 +4191,9 @@ static void cas_link_timer(unsigned long data)
                wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
                rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
                if ((val == 0) && (wptr != rptr)) {
-                       if (netif_msg_tx_err(cp))
-                               printk(KERN_DEBUG "%s: tx err: "
-                                      "TX_FIFO[%08x:%08x:%08x]\n",
-                                      cp->dev->name, val, wptr, rptr);
+                       netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+                                    "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+                                    val, wptr, rptr);
                        reset = 1;
                }
 
@@ -4230,7 +4209,7 @@ done:
                schedule_work(&cp->reset_task);
 #else
                atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
-               printk(KERN_ERR "reset called in cas_link_timer\n");
+               pr_err("reset called in cas_link_timer\n");
                schedule_work(&cp->reset_task);
 #endif
        }
@@ -4241,7 +4220,7 @@ done:
        spin_unlock_irqrestore(&cp->lock, flags);
 }
 
-/* tiny buffers are used to avoid target abort issues with 
+/* tiny buffers are used to avoid target abort issues with
  * older cassini's
  */
 static void cas_tx_tiny_free(struct cas *cp)
@@ -4253,7 +4232,7 @@ static void cas_tx_tiny_free(struct cas *cp)
                if (!cp->tx_tiny_bufs[i])
                        continue;
 
-               pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, 
+               pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
                                    cp->tx_tiny_bufs[i],
                                    cp->tx_tiny_dvma[i]);
                cp->tx_tiny_bufs[i] = NULL;
@@ -4266,7 +4245,7 @@ static int cas_tx_tiny_alloc(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++) {
-               cp->tx_tiny_bufs[i] = 
+               cp->tx_tiny_bufs[i] =
                        pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
                                             &cp->tx_tiny_dvma[i]);
                if (!cp->tx_tiny_bufs[i]) {
@@ -4284,18 +4263,18 @@ static int cas_open(struct net_device *dev)
        int hw_was_up, err;
        unsigned long flags;
 
-       down(&cp->pm_sem);
+       mutex_lock(&cp->pm_mutex);
 
        hw_was_up = cp->hw_running;
 
-       /* The power-management semaphore protects the hw_running
+       /* The power-management mutex protects the hw_running
         * etc. state so it is safe to do this bit without cp->lock
         */
        if (!cp->hw_running) {
                /* Reset the chip */
                cas_lock_all_save(cp, flags);
                /* We set the second arg to cas_reset to zero
-                * because cas_init_hw below will have its second 
+                * because cas_init_hw below will have its second
                 * argument set to non-zero, which will force
                 * autonegotiation to start.
                 */
@@ -4304,31 +4283,33 @@ static int cas_open(struct net_device *dev)
                cas_unlock_all_restore(cp, flags);
        }
 
+       err = -ENOMEM;
        if (cas_tx_tiny_alloc(cp) < 0)
-               return -ENOMEM;
+               goto err_unlock;
 
        /* alloc rx descriptors */
-       err = -ENOMEM;
        if (cas_alloc_rxds(cp) < 0)
                goto err_tx_tiny;
-       
+
        /* allocate spares */
        cas_spare_init(cp);
        cas_spare_recover(cp, GFP_KERNEL);
 
        /* We can now request the interrupt as we know it's masked
         * on the controller. cassini+ has up to 4 interrupts
-        * that can be used, but you need to do explicit pci interrupt 
+        * that can be used, but you need to do explicit pci interrupt
         * mapping to expose them
         */
        if (request_irq(cp->pdev->irq, cas_interrupt,
-                       SA_SHIRQ, dev->name, (void *) dev)) {
-               printk(KERN_ERR "%s: failed to request irq !\n", 
-                      cp->dev->name);
+                       IRQF_SHARED, dev->name, (void *) dev)) {
+               netdev_err(cp->dev, "failed to request irq !\n");
                err = -EAGAIN;
                goto err_spare;
        }
 
+#ifdef USE_NAPI
+       napi_enable(&cp->napi);
+#endif
        /* init hw */
        cas_lock_all_save(cp, flags);
        cas_clean_rings(cp);
@@ -4337,7 +4318,7 @@ static int cas_open(struct net_device *dev)
        cas_unlock_all_restore(cp, flags);
 
        netif_start_queue(dev);
-       up(&cp->pm_sem);
+       mutex_unlock(&cp->pm_mutex);
        return 0;
 
 err_spare:
@@ -4345,7 +4326,8 @@ err_spare:
        cas_free_rxds(cp);
 err_tx_tiny:
        cas_tx_tiny_free(cp);
-       up(&cp->pm_sem);
+err_unlock:
+       mutex_unlock(&cp->pm_mutex);
        return err;
 }
 
@@ -4354,16 +4336,19 @@ static int cas_close(struct net_device *dev)
        unsigned long flags;
        struct cas *cp = netdev_priv(dev);
 
+#ifdef USE_NAPI
+       napi_disable(&cp->napi);
+#endif
        /* Make sure we don't get distracted by suspend/resume */
-       down(&cp->pm_sem);
+       mutex_lock(&cp->pm_mutex);
 
        netif_stop_queue(dev);
 
        /* Stop traffic, mark us closed */
        cas_lock_all_save(cp, flags);
-       cp->opened = 0; 
+       cp->opened = 0;
        cas_reset(cp, 0);
-       cas_phy_init(cp); 
+       cas_phy_init(cp);
        cas_begin_auto_negotiation(cp, NULL);
        cas_clean_rings(cp);
        cas_unlock_all_restore(cp, flags);
@@ -4372,7 +4357,7 @@ static int cas_close(struct net_device *dev)
        cas_spare_free(cp);
        cas_free_rxds(cp);
        cas_tx_tiny_free(cp);
-       up(&cp->pm_sem);
+       mutex_unlock(&cp->pm_mutex);
        return 0;
 }
 
@@ -4396,7 +4381,7 @@ static struct {
        {"tx_fifo_errors"},
        {"tx_packets"}
 };
-#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
+#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
 
 static struct {
        const int offsets;      /* neg. values for 2nd arg to cas_read_phy */
@@ -4420,21 +4405,17 @@ static struct {
        {REG_MAC_COLL_EXCESS},
        {REG_MAC_COLL_LATE}
 };
-#define CAS_REG_LEN    (sizeof(ethtool_register_table)/sizeof(int))
+#define CAS_REG_LEN    ARRAY_SIZE(ethtool_register_table)
 #define CAS_MAX_REGS   (sizeof (u32)*CAS_REG_LEN)
 
-static u8 *cas_get_regs(struct cas *cp)
+static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
 {
-       u8 *ptr = kmalloc(CAS_MAX_REGS, GFP_KERNEL);
        u8 *p;
        int i;
        unsigned long flags;
 
-       if (!ptr)
-               return NULL;
-
        spin_lock_irqsave(&cp->lock, flags);
-       for (i = 0, p = ptr; i < CAS_REG_LEN ; i ++, p += sizeof(u32)) {
+       for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
                u16 hval;
                u32 val;
                if (ethtool_register_table[i].offsets < 0) {
@@ -4447,8 +4428,6 @@ static u8 *cas_get_regs(struct cas *cp)
                memcpy(p, (u8 *)&val, sizeof(u32));
        }
        spin_unlock_irqrestore(&cp->lock, flags);
-
-       return ptr;
 }
 
 static struct net_device_stats *cas_get_stats(struct net_device *dev)
@@ -4462,7 +4441,7 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
        /* we collate all of the stats into net_stats[N_TX_RING] */
        if (!cp->hw_running)
                return stats + N_TX_RINGS;
-       
+
        /* collect outstanding stats */
        /* WTZ: the Cassini spec gives these as 16 bit counters but
         * stored in 32-bit words.  Added a mask of 0xffff to be safe,
@@ -4472,11 +4451,11 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
         * that consistent.
         */
        spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
-       stats[N_TX_RINGS].rx_crc_errors += 
+       stats[N_TX_RINGS].rx_crc_errors +=
          readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
-       stats[N_TX_RINGS].rx_frame_errors += 
+       stats[N_TX_RINGS].rx_frame_errors +=
                readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
-       stats[N_TX_RINGS].rx_length_errors += 
+       stats[N_TX_RINGS].rx_length_errors +=
                readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
 #if 1
        tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
@@ -4485,7 +4464,7 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
        stats[N_TX_RINGS].collisions +=
          tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
 #else
-       stats[N_TX_RINGS].tx_aborted_errors += 
+       stats[N_TX_RINGS].tx_aborted_errors +=
                readl(cp->regs + REG_MAC_COLL_EXCESS);
        stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
                readl(cp->regs + REG_MAC_COLL_LATE);
@@ -4504,7 +4483,7 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
 
        for (i = 0; i < N_TX_RINGS; i++) {
                spin_lock(&cp->stat_lock[i]);
-               stats[N_TX_RINGS].rx_length_errors += 
+               stats[N_TX_RINGS].rx_length_errors +=
                        stats[i].rx_length_errors;
                stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
                stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
@@ -4529,10 +4508,10 @@ static void cas_set_multicast(struct net_device *dev)
        u32 rxcfg, rxcfg_new;
        unsigned long flags;
        int limit = STOP_TRIES;
-       
+
        if (!cp->hw_running)
                return;
-               
+
        spin_lock_irqsave(&cp->lock, flags);
        rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
 
@@ -4561,332 +4540,268 @@ static void cas_set_multicast(struct net_device *dev)
        spin_unlock_irqrestore(&cp->lock, flags);
 }
 
-/* Eventually add support for changing the advertisement
- * on autoneg.
- */
-static int cas_ethtool_ioctl(struct net_device *dev, void __user *ep_user)
+static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct cas *cp = netdev_priv(dev);
+       strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
+       strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
+       info->fw_version[0] = '\0';
+       strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+       info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
+               cp->casreg_len : CAS_MAX_REGS;
+       info->n_stats = CAS_NUM_STAT_KEYS;
+}
+
+static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct cas *cp = netdev_priv(dev);
        u16 bmcr;
        int full_duplex, speed, pause;
-       struct ethtool_cmd ecmd;
        unsigned long flags;
        enum link_state linkstate = link_up;
 
-       if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
-               return -EFAULT;
-               
-       switch(ecmd.cmd) {
-        case ETHTOOL_GDRVINFO: {
-               struct ethtool_drvinfo info = { .cmd = ETHTOOL_GDRVINFO };
-
-               strncpy(info.driver, DRV_MODULE_NAME,
-                       ETHTOOL_BUSINFO_LEN);
-               strncpy(info.version, DRV_MODULE_VERSION,
-                       ETHTOOL_BUSINFO_LEN);
-               info.fw_version[0] = '\0';
-               strncpy(info.bus_info, pci_name(cp->pdev),
-                       ETHTOOL_BUSINFO_LEN);
-               info.regdump_len = cp->casreg_len < CAS_MAX_REGS ?
-                       cp->casreg_len : CAS_MAX_REGS;
-               info.n_stats = CAS_NUM_STAT_KEYS;
-               if (copy_to_user(ep_user, &info, sizeof(info)))
-                       return -EFAULT;
-
-               return 0;
+       cmd->advertising = 0;
+       cmd->supported = SUPPORTED_Autoneg;
+       if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+               cmd->supported |= SUPPORTED_1000baseT_Full;
+               cmd->advertising |= ADVERTISED_1000baseT_Full;
        }
 
-       case ETHTOOL_GSET:
-               ecmd.advertising = 0;
-               ecmd.supported = SUPPORTED_Autoneg;
-               if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
-                       ecmd.supported |= SUPPORTED_1000baseT_Full;
-                       ecmd.advertising |= ADVERTISED_1000baseT_Full;
+       /* Record PHY settings if HW is on. */
+       spin_lock_irqsave(&cp->lock, flags);
+       bmcr = 0;
+       linkstate = cp->lstate;
+       if (CAS_PHY_MII(cp->phy_type)) {
+               cmd->port = PORT_MII;
+               cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
+                       XCVR_INTERNAL : XCVR_EXTERNAL;
+               cmd->phy_address = cp->phy_addr;
+               cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+                       ADVERTISED_10baseT_Half |
+                       ADVERTISED_10baseT_Full |
+                       ADVERTISED_100baseT_Half |
+                       ADVERTISED_100baseT_Full;
+
+               cmd->supported |=
+                       (SUPPORTED_10baseT_Half |
+                        SUPPORTED_10baseT_Full |
+                        SUPPORTED_100baseT_Half |
+                        SUPPORTED_100baseT_Full |
+                        SUPPORTED_TP | SUPPORTED_MII);
+
+               if (cp->hw_running) {
+                       cas_mif_poll(cp, 0);
+                       bmcr = cas_phy_read(cp, MII_BMCR);
+                       cas_read_mii_link_mode(cp, &full_duplex,
+                                              &speed, &pause);
+                       cas_mif_poll(cp, 1);
                }
 
-               /* Record PHY settings if HW is on. */
-               spin_lock_irqsave(&cp->lock, flags);
-               bmcr = 0;
-               linkstate = cp->lstate;
-               if (CAS_PHY_MII(cp->phy_type)) {
-                       ecmd.port = PORT_MII;
-                       ecmd.transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
-                               XCVR_INTERNAL : XCVR_EXTERNAL;
-                       ecmd.phy_address = cp->phy_addr;
-                       ecmd.advertising |= ADVERTISED_TP | ADVERTISED_MII |
-                               ADVERTISED_10baseT_Half | 
-                               ADVERTISED_10baseT_Full | 
-                               ADVERTISED_100baseT_Half | 
-                               ADVERTISED_100baseT_Full;
-
-                       ecmd.supported |=
-                               (SUPPORTED_10baseT_Half | 
-                                SUPPORTED_10baseT_Full |
-                                SUPPORTED_100baseT_Half | 
-                                SUPPORTED_100baseT_Full |
-                                SUPPORTED_TP | SUPPORTED_MII);
-
-                       if (cp->hw_running) {
-                               cas_mif_poll(cp, 0);
-                               bmcr = cas_phy_read(cp, MII_BMCR);
-                               cas_read_mii_link_mode(cp, &full_duplex, 
-                                                      &speed, &pause);
-                               cas_mif_poll(cp, 1);
-                       }
-
-               } else {
-                       ecmd.port = PORT_FIBRE;
-                       ecmd.transceiver = XCVR_INTERNAL;
-                       ecmd.phy_address = 0;
-                       ecmd.supported   |= SUPPORTED_FIBRE;
-                       ecmd.advertising |= ADVERTISED_FIBRE;
-
-                       if (cp->hw_running) {
-                               /* pcs uses the same bits as mii */ 
-                               bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
-                               cas_read_pcs_link_mode(cp, &full_duplex, 
-                                                      &speed, &pause);
-                       }
+       } else {
+               cmd->port = PORT_FIBRE;
+               cmd->transceiver = XCVR_INTERNAL;
+               cmd->phy_address = 0;
+               cmd->supported   |= SUPPORTED_FIBRE;
+               cmd->advertising |= ADVERTISED_FIBRE;
+
+               if (cp->hw_running) {
+                       /* pcs uses the same bits as mii */
+                       bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
+                       cas_read_pcs_link_mode(cp, &full_duplex,
+                                              &speed, &pause);
                }
-               spin_unlock_irqrestore(&cp->lock, flags);
+       }
+       spin_unlock_irqrestore(&cp->lock, flags);
 
-               if (bmcr & BMCR_ANENABLE) {
-                       ecmd.advertising |= ADVERTISED_Autoneg;
-                       ecmd.autoneg = AUTONEG_ENABLE;
-                       ecmd.speed = ((speed == 10) ?
-                                     SPEED_10 :
-                                     ((speed == 1000) ?
-                                      SPEED_1000 : SPEED_100));
-                       ecmd.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       if (bmcr & BMCR_ANENABLE) {
+               cmd->advertising |= ADVERTISED_Autoneg;
+               cmd->autoneg = AUTONEG_ENABLE;
+               cmd->speed = ((speed == 10) ?
+                             SPEED_10 :
+                             ((speed == 1000) ?
+                              SPEED_1000 : SPEED_100));
+               cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       } else {
+               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->speed =
+                       (bmcr & CAS_BMCR_SPEED1000) ?
+                       SPEED_1000 :
+                       ((bmcr & BMCR_SPEED100) ? SPEED_100:
+                        SPEED_10);
+               cmd->duplex =
+                       (bmcr & BMCR_FULLDPLX) ?
+                       DUPLEX_FULL : DUPLEX_HALF;
+       }
+       if (linkstate != link_up) {
+               /* Force these to "unknown" if the link is not up and
+                * autonogotiation in enabled. We can set the link
+                * speed to 0, but not cmd->duplex,
+                * because its legal values are 0 and 1.  Ethtool will
+                * print the value reported in parentheses after the
+                * word "Unknown" for unrecognized values.
+                *
+                * If in forced mode, we report the speed and duplex
+                * settings that we configured.
+                */
+               if (cp->link_cntl & BMCR_ANENABLE) {
+                       cmd->speed = 0;
+                       cmd->duplex = 0xff;
                } else {
-                       ecmd.autoneg = AUTONEG_DISABLE;
-                       ecmd.speed =
-                               (bmcr & CAS_BMCR_SPEED1000) ?
-                               SPEED_1000 : 
-                               ((bmcr & BMCR_SPEED100) ? SPEED_100: 
-                                SPEED_10);
-                       ecmd.duplex =
-                               (bmcr & BMCR_FULLDPLX) ?
-                               DUPLEX_FULL : DUPLEX_HALF;
-               }
-               if (linkstate != link_up) {
-                       /* Force these to "unknown" if the link is not up and
-                        * autonogotiation in enabled. We can set the link 
-                        * speed to 0, but not ecmd.duplex,
-                        * because its legal values are 0 and 1.  Ethtool will
-                        * print the value reported in parentheses after the
-                        * word "Unknown" for unrecognized values.
-                        *
-                        * If in forced mode, we report the speed and duplex
-                        * settings that we configured.
-                        */
-                       if (cp->link_cntl & BMCR_ANENABLE) {
-                               ecmd.speed = 0;
-                               ecmd.duplex = 0xff;
-                       } else {
-                               ecmd.speed = SPEED_10;
-                               if (cp->link_cntl & BMCR_SPEED100) {
-                                       ecmd.speed = SPEED_100;
-                               } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
-                                       ecmd.speed = SPEED_1000;
-                               }
-                               ecmd.duplex = (cp->link_cntl & BMCR_FULLDPLX)?
-                                       DUPLEX_FULL : DUPLEX_HALF;
+                       cmd->speed = SPEED_10;
+                       if (cp->link_cntl & BMCR_SPEED100) {
+                               cmd->speed = SPEED_100;
+                       } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
+                               cmd->speed = SPEED_1000;
                        }
+                       cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+                               DUPLEX_FULL : DUPLEX_HALF;
                }
-               if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
-                       return -EFAULT;
-               return 0;
+       }
+       return 0;
+}
 
-       case ETHTOOL_SSET:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
+static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
 
-               /* Verify the settings we care about. */
-               if (ecmd.autoneg != AUTONEG_ENABLE &&
-                   ecmd.autoneg != AUTONEG_DISABLE)
-                       return -EINVAL;
+       /* Verify the settings we care about. */
+       if (cmd->autoneg != AUTONEG_ENABLE &&
+           cmd->autoneg != AUTONEG_DISABLE)
+               return -EINVAL;
 
-               if (ecmd.autoneg == AUTONEG_DISABLE &&
-                   ((ecmd.speed != SPEED_1000 &&
-                     ecmd.speed != SPEED_100 &&
-                     ecmd.speed != SPEED_10) ||
-                    (ecmd.duplex != DUPLEX_HALF &&
-                     ecmd.duplex != DUPLEX_FULL)))
-                       return -EINVAL;
+       if (cmd->autoneg == AUTONEG_DISABLE &&
+           ((cmd->speed != SPEED_1000 &&
+             cmd->speed != SPEED_100 &&
+             cmd->speed != SPEED_10) ||
+            (cmd->duplex != DUPLEX_HALF &&
+             cmd->duplex != DUPLEX_FULL)))
+               return -EINVAL;
 
-               /* Apply settings and restart link process. */
-               spin_lock_irqsave(&cp->lock, flags);
-               cas_begin_auto_negotiation(cp, &ecmd);
-               spin_unlock_irqrestore(&cp->lock, flags);
-               return 0;
+       /* Apply settings and restart link process. */
+       spin_lock_irqsave(&cp->lock, flags);
+       cas_begin_auto_negotiation(cp, cmd);
+       spin_unlock_irqrestore(&cp->lock, flags);
+       return 0;
+}
 
-       case ETHTOOL_NWAY_RST:
-               if ((cp->link_cntl & BMCR_ANENABLE) == 0)
-                       return -EINVAL;
+static int cas_nway_reset(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
 
-               /* Restart link process. */
-               spin_lock_irqsave(&cp->lock, flags);
-               cas_begin_auto_negotiation(cp, NULL);
-               spin_unlock_irqrestore(&cp->lock, flags);
+       if ((cp->link_cntl & BMCR_ANENABLE) == 0)
+               return -EINVAL;
 
-               return 0;
+       /* Restart link process. */
+       spin_lock_irqsave(&cp->lock, flags);
+       cas_begin_auto_negotiation(cp, NULL);
+       spin_unlock_irqrestore(&cp->lock, flags);
 
-       case ETHTOOL_GWOL:
-       case ETHTOOL_SWOL:
-               break; /* doesn't exist */
+       return 0;
+}
 
-       /* get link status */
-       case ETHTOOL_GLINK: {
-               struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
+static u32 cas_get_link(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       return cp->lstate == link_up;
+}
 
-               edata.data = (cp->lstate == link_up);
-               if (copy_to_user(ep_user, &edata, sizeof(edata)))
-                       return -EFAULT;
-               return 0;
-       }
+static u32 cas_get_msglevel(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       return cp->msg_enable;
+}
 
-       /* get message-level */
-       case ETHTOOL_GMSGLVL: {
-               struct ethtool_value edata = { .cmd = ETHTOOL_GMSGLVL };
+static void cas_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct cas *cp = netdev_priv(dev);
+       cp->msg_enable = value;
+}
 
-               edata.data = cp->msg_enable;
-               if (copy_to_user(ep_user, &edata, sizeof(edata)))
-                       return -EFAULT;
-               return 0;
-       }
+static int cas_get_regs_len(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+}
 
-       /* set message-level */
-       case ETHTOOL_SMSGLVL: {
-               struct ethtool_value edata;
+static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                            void *p)
+{
+       struct cas *cp = netdev_priv(dev);
+       regs->version = 0;
+       /* cas_read_regs handles locks (cp->lock).  */
+       cas_read_regs(cp, p, regs->len / sizeof(u32));
+}
 
-               if (!capable(CAP_NET_ADMIN)) {
-                       return (-EPERM);
-               }
-               if (copy_from_user(&edata, ep_user, sizeof(edata)))
-                       return -EFAULT;
-               cp->msg_enable = edata.data;
-               return 0;
+static int cas_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return CAS_NUM_STAT_KEYS;
+       default:
+               return -EOPNOTSUPP;
        }
+}
 
-       case ETHTOOL_GREGS: {
-               struct ethtool_regs edata;
-               u8 *ptr;
-               int len = cp->casreg_len < CAS_MAX_REGS ?
-                       cp->casreg_len: CAS_MAX_REGS;
-
-               if (copy_from_user(&edata, ep_user, sizeof (edata)))
-                       return -EFAULT;
-
-               if (edata.len > len)
-                       edata.len = len;
-               edata.version = 0;
-               if (copy_to_user (ep_user, &edata, sizeof(edata)))
-                       return -EFAULT;
-
-               /* cas_get_regs handles locks (cp->lock).  */
-               ptr = cas_get_regs(cp);
-               if (ptr == NULL)
-                       return -ENOMEM;
-               if (copy_to_user(ep_user + sizeof (edata), ptr, edata.len))
-                       return -EFAULT;
-
-               kfree(ptr);
-               return (0);
-       }
-       case ETHTOOL_GSTRINGS: {
-               struct ethtool_gstrings edata;
-               int len;
-
-               if (copy_from_user(&edata, ep_user, sizeof(edata)))
-                       return -EFAULT;
-
-               len = edata.len;
-               switch(edata.string_set) {
-               case ETH_SS_STATS:
-                       edata.len = (len < CAS_NUM_STAT_KEYS) ?
-                               len : CAS_NUM_STAT_KEYS;
-                       if (copy_to_user(ep_user, &edata, sizeof(edata)))
-                               return -EFAULT;
-
-                       if (copy_to_user(ep_user + sizeof(edata),
-                                        &ethtool_cassini_statnames, 
-                                        (edata.len * ETH_GSTRING_LEN)))
-                               return -EFAULT;
-                       return 0;
-               default:
-                       return -EINVAL;
-               }
-       }
-       case ETHTOOL_GSTATS: {
-               int i = 0;
-               u64 *tmp;
-               struct ethtool_stats edata;
-               struct net_device_stats *stats;
-               int len;
-
-               if (copy_from_user(&edata, ep_user, sizeof(edata)))
-                       return -EFAULT;
-
-               len = edata.n_stats;
-               stats = cas_get_stats(cp->dev);
-               edata.cmd = ETHTOOL_GSTATS;
-               edata.n_stats = (len < CAS_NUM_STAT_KEYS) ?
-                       len : CAS_NUM_STAT_KEYS;
-               if (copy_to_user(ep_user, &edata, sizeof (edata)))
-                       return -EFAULT;
-
-               tmp = kmalloc(sizeof(u64)*CAS_NUM_STAT_KEYS, GFP_KERNEL);
-               if (tmp) {
-                       tmp[i++] = stats->collisions;
-                       tmp[i++] = stats->rx_bytes;
-                       tmp[i++] = stats->rx_crc_errors;
-                       tmp[i++] = stats->rx_dropped;
-                       tmp[i++] = stats->rx_errors;
-                       tmp[i++] = stats->rx_fifo_errors;
-                       tmp[i++] = stats->rx_frame_errors;
-                       tmp[i++] = stats->rx_length_errors;
-                       tmp[i++] = stats->rx_over_errors;
-                       tmp[i++] = stats->rx_packets;
-                       tmp[i++] = stats->tx_aborted_errors;
-                       tmp[i++] = stats->tx_bytes;
-                       tmp[i++] = stats->tx_dropped;
-                       tmp[i++] = stats->tx_errors;
-                       tmp[i++] = stats->tx_fifo_errors;
-                       tmp[i++] = stats->tx_packets;
-                       BUG_ON(i != CAS_NUM_STAT_KEYS);
-
-                       i = copy_to_user(ep_user + sizeof(edata),
-                                        tmp, sizeof(u64)*edata.n_stats);
-                       kfree(tmp);
-               } else {
-                       return -ENOMEM;
-               }
-               if (i)
-                       return -EFAULT;
-               return 0;
-       }
-       }
+static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+        memcpy(data, &ethtool_cassini_statnames,
+                                        CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
+}
 
-       return -EOPNOTSUPP;
+static void cas_get_ethtool_stats(struct net_device *dev,
+                                     struct ethtool_stats *estats, u64 *data)
+{
+       struct cas *cp = netdev_priv(dev);
+       struct net_device_stats *stats = cas_get_stats(cp->dev);
+       int i = 0;
+       data[i++] = stats->collisions;
+       data[i++] = stats->rx_bytes;
+       data[i++] = stats->rx_crc_errors;
+       data[i++] = stats->rx_dropped;
+       data[i++] = stats->rx_errors;
+       data[i++] = stats->rx_fifo_errors;
+       data[i++] = stats->rx_frame_errors;
+       data[i++] = stats->rx_length_errors;
+       data[i++] = stats->rx_over_errors;
+       data[i++] = stats->rx_packets;
+       data[i++] = stats->tx_aborted_errors;
+       data[i++] = stats->tx_bytes;
+       data[i++] = stats->tx_dropped;
+       data[i++] = stats->tx_errors;
+       data[i++] = stats->tx_fifo_errors;
+       data[i++] = stats->tx_packets;
+       BUG_ON(i != CAS_NUM_STAT_KEYS);
 }
 
+static const struct ethtool_ops cas_ethtool_ops = {
+       .get_drvinfo            = cas_get_drvinfo,
+       .get_settings           = cas_get_settings,
+       .set_settings           = cas_set_settings,
+       .nway_reset             = cas_nway_reset,
+       .get_link               = cas_get_link,
+       .get_msglevel           = cas_get_msglevel,
+       .set_msglevel           = cas_set_msglevel,
+       .get_regs_len           = cas_get_regs_len,
+       .get_regs               = cas_get_regs,
+       .get_sset_count         = cas_get_sset_count,
+       .get_strings            = cas_get_strings,
+       .get_ethtool_stats      = cas_get_ethtool_stats,
+};
+
 static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct cas *cp = netdev_priv(dev);
        struct mii_ioctl_data *data = if_mii(ifr);
        unsigned long flags;
        int rc = -EOPNOTSUPP;
-       
-       /* Hold the PM semaphore while doing ioctl's or we may collide
+
+       /* Hold the PM mutex while doing ioctl's or we may collide
         * with open/close and power management and oops.
         */
-       down(&cp->pm_sem);
+       mutex_lock(&cp->pm_mutex);
        switch (cmd) {
-       case SIOCETHTOOL:
-               rc = cas_ethtool_ioctl(dev, ifr->ifr_data);
-               break;
-
        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
                data->phy_id = cp->phy_addr;
                /* Fallthrough... */
@@ -4901,10 +4816,6 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
 
        case SIOCSMIIREG:               /* Write MII PHY register. */
-               if (!capable(CAP_NET_ADMIN)) {
-                       rc = -EPERM;
-                       break;
-               }
                spin_lock_irqsave(&cp->lock, flags);
                cas_mif_poll(cp, 0);
                rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
@@ -4915,15 +4826,115 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
        };
 
-       up(&cp->pm_sem);
+       mutex_unlock(&cp->pm_mutex);
        return rc;
 }
 
+/* When this chip sits underneath an Intel 31154 bridge, it is the
+ * only subordinate device and we can tweak the bridge settings to
+ * reflect that fact.
+ */
+static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+{
+       struct pci_dev *pdev = cas_pdev->bus->self;
+       u32 val;
+
+       if (!pdev)
+               return;
+
+       if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
+               return;
+
+       /* Clear bit 10 (Bus Parking Control) in the Secondary
+        * Arbiter Control/Status Register which lives at offset
+        * 0x41.  Using a 32-bit word read/modify/write at 0x40
+        * is much simpler so that's how we do this.
+        */
+       pci_read_config_dword(pdev, 0x40, &val);
+       val &= ~0x00040000;
+       pci_write_config_dword(pdev, 0x40, val);
+
+       /* Max out the Multi-Transaction Timer settings since
+        * Cassini is the only device present.
+        *
+        * The register is 16-bit and lives at 0x50.  When the
+        * settings are enabled, it extends the GRANT# signal
+        * for a requestor after a transaction is complete.  This
+        * allows the next request to run without first needing
+        * to negotiate the GRANT# signal back.
+        *
+        * Bits 12:10 define the grant duration:
+        *
+        *      1       --      16 clocks
+        *      2       --      32 clocks
+        *      3       --      64 clocks
+        *      4       --      128 clocks
+        *      5       --      256 clocks
+        *
+        * All other values are illegal.
+        *
+        * Bits 09:00 define which REQ/GNT signal pairs get the
+        * GRANT# signal treatment.  We set them all.
+        */
+       pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
+
+       /* The Read Prefecth Policy register is 16-bit and sits at
+        * offset 0x52.  It enables a "smart" pre-fetch policy.  We
+        * enable it and max out all of the settings since only one
+        * device is sitting underneath and thus bandwidth sharing is
+        * not an issue.
+        *
+        * The register has several 3 bit fields, which indicates a
+        * multiplier applied to the base amount of prefetching the
+        * chip would do.  These fields are at:
+        *
+        *      15:13   ---     ReRead Primary Bus
+        *      12:10   ---     FirstRead Primary Bus
+        *      09:07   ---     ReRead Secondary Bus
+        *      06:04   ---     FirstRead Secondary Bus
+        *
+        * Bits 03:00 control which REQ/GNT pairs the prefetch settings
+        * get enabled on.  Bit 3 is a grouped enabler which controls
+        * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
+        * the individual REQ/GNT pairs [2:0].
+        */
+       pci_write_config_word(pdev, 0x52,
+                             (0x7 << 13) |
+                             (0x7 << 10) |
+                             (0x7 <<  7) |
+                             (0x7 <<  4) |
+                             (0xf <<  0));
+
+       /* Force cacheline size to 0x8 */
+       pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+
+       /* Force latency timer to maximum setting so Cassini can
+        * sit on the bus as long as it likes.
+        */
+       pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
+}
+
+static const struct net_device_ops cas_netdev_ops = {
+       .ndo_open               = cas_open,
+       .ndo_stop               = cas_close,
+       .ndo_start_xmit         = cas_start_xmit,
+       .ndo_get_stats          = cas_get_stats,
+       .ndo_set_multicast_list = cas_set_multicast,
+       .ndo_do_ioctl           = cas_ioctl,
+       .ndo_tx_timeout         = cas_tx_timeout,
+       .ndo_change_mtu         = cas_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = cas_netpoll,
+#endif
+};
+
 static int __devinit cas_init_one(struct pci_dev *pdev,
                                  const struct pci_device_id *ent)
 {
        static int cas_version_printed = 0;
-       unsigned long casreg_base, casreg_len;
+       unsigned long casreg_len;
        struct net_device *dev;
        struct cas *cp;
        int i, err, pci_using_dac;
@@ -4931,35 +4942,32 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
 
        if (cas_version_printed++ == 0)
-               printk(KERN_INFO "%s", version);
+               pr_info("%s", version);
 
        err = pci_enable_device(pdev);
        if (err) {
-               printk(KERN_ERR PFX "Cannot enable PCI device, "
-                      "aborting.\n");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                return err;
        }
 
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               printk(KERN_ERR PFX "Cannot find proper PCI device "
-                      "base address, aborting.\n");
+               dev_err(&pdev->dev, "Cannot find proper PCI device "
+                      "base address, aborting\n");
                err = -ENODEV;
                goto err_out_disable_pdev;
        }
 
        dev = alloc_etherdev(sizeof(*cp));
        if (!dev) {
-               printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+               dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
                err = -ENOMEM;
                goto err_out_disable_pdev;
        }
-       SET_MODULE_OWNER(dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = pci_request_regions(pdev, dev->name);
        if (err) {
-               printk(KERN_ERR PFX "Cannot obtain PCI resources, "
-                      "aborting.\n");
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
                goto err_out_free_netdev;
        }
        pci_set_master(pdev);
@@ -4972,10 +4980,14 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        pci_cmd &= ~PCI_COMMAND_SERR;
        pci_cmd |= PCI_COMMAND_PARITY;
        pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
-       pci_set_mwi(pdev);
+       if (pci_try_set_mwi(pdev))
+               pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+
+       cas_program_bridge(pdev);
+
        /*
         * On some architectures, the default cache line size set
-        * by pci_set_mwi reduces perforamnce.  We have to increase
+        * by pci_try_set_mwi reduces perforamnce.  We have to increase
         * it for this case.  To start, we'll print some configuration
         * data.
         */
@@ -4983,13 +4995,13 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
                             &orig_cacheline_size);
        if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
-               cas_cacheline_size = 
-                       (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? 
+               cas_cacheline_size =
+                       (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
                        CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
-               if (pci_write_config_byte(pdev, 
-                                         PCI_CACHE_LINE_SIZE, 
+               if (pci_write_config_byte(pdev,
+                                         PCI_CACHE_LINE_SIZE,
                                          cas_cacheline_size)) {
-                       printk(KERN_ERR PFX "Could not set PCI cache "
+                       dev_err(&pdev->dev, "Could not set PCI cache "
                               "line size\n");
                        goto err_write_cacheline;
                }
@@ -4998,27 +5010,26 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
 
 
        /* Configure DMA attributes. */
-       if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
                err = pci_set_consistent_dma_mask(pdev,
-                                                 DMA_64BIT_MASK);
+                                                 DMA_BIT_MASK(64));
                if (err < 0) {
-                       printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
+                       dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
                               "for consistent allocations\n");
                        goto err_out_free_res;
                }
 
        } else {
-               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       printk(KERN_ERR PFX "No usable DMA configuration, "
-                              "aborting.\n");
+                       dev_err(&pdev->dev, "No usable DMA configuration, "
+                              "aborting\n");
                        goto err_out_free_res;
                }
                pci_using_dac = 0;
        }
 
-       casreg_base = pci_resource_start(pdev, 0);
        casreg_len = pci_resource_len(pdev, 0);
 
        cp = netdev_priv(dev);
@@ -5028,7 +5039,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
 #endif
        cp->dev = dev;
-       cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 
+       cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
          cassini_debug;
 
        cp->link_transition = LINK_TRANSITION_UNKNOWN;
@@ -5042,7 +5053,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
                spin_lock_init(&cp->tx_lock[i]);
        }
        spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
-       init_MUTEX(&cp->pm_sem);
+       mutex_init(&cp->pm_mutex);
 
        init_timer(&cp->link_timer);
        cp->link_timer.function = cas_link_timer;
@@ -5057,10 +5068,10 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        atomic_set(&cp->reset_task_pending_spare, 0);
        atomic_set(&cp->reset_task_pending_mtu, 0);
 #endif
-       INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+       INIT_WORK(&cp->reset_task, cas_reset_task);
 
        /* Default link parameters */
-       if (link_mode >= 0 && link_mode <= 6)
+       if (link_mode >= 0 && link_mode < 6)
                cp->link_cntl = link_modes[link_mode];
        else
                cp->link_cntl = BMCR_ANENABLE;
@@ -5070,10 +5081,9 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        cp->timer_ticks = 0;
 
        /* give us access to cassini registers */
-       cp->regs = ioremap(casreg_base, casreg_len);
-       if (cp->regs == 0UL) {
-               printk(KERN_ERR PFX "Cannot map device registers, "
-                      "aborting.\n");
+       cp->regs = pci_iomap(pdev, 0, casreg_len);
+       if (!cp->regs) {
+               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
                goto err_out_free_res;
        }
        cp->casreg_len = casreg_len;
@@ -5084,43 +5094,36 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        cas_reset(cp, 0);
        if (cas_check_invariants(cp))
                goto err_out_iounmap;
+       if (cp->cas_flags & CAS_FLAG_SATURN)
+               if (cas_saturn_firmware_init(cp))
+                       goto err_out_iounmap;
 
        cp->init_block = (struct cas_init_block *)
                pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
                                     &cp->block_dvma);
        if (!cp->init_block) {
-               printk(KERN_ERR PFX "Cannot allocate init block, "
-                      "aborting.\n");
+               dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
                goto err_out_iounmap;
        }
 
-       for (i = 0; i < N_TX_RINGS; i++) 
+       for (i = 0; i < N_TX_RINGS; i++)
                cp->init_txds[i] = cp->init_block->txds[i];
 
-       for (i = 0; i < N_RX_DESC_RINGS; i++) 
+       for (i = 0; i < N_RX_DESC_RINGS; i++)
                cp->init_rxds[i] = cp->init_block->rxds[i];
 
-       for (i = 0; i < N_RX_COMP_RINGS; i++) 
+       for (i = 0; i < N_RX_COMP_RINGS; i++)
                cp->init_rxcs[i] = cp->init_block->rxcs[i];
 
        for (i = 0; i < N_RX_FLOWS; i++)
                skb_queue_head_init(&cp->rx_flows[i]);
 
-       dev->open = cas_open;
-       dev->stop = cas_close;
-       dev->hard_start_xmit = cas_start_xmit;
-       dev->get_stats = cas_get_stats;
-       dev->set_multicast_list = cas_set_multicast;
-       dev->do_ioctl = cas_ioctl;
-       dev->tx_timeout = cas_tx_timeout;
+       dev->netdev_ops = &cas_netdev_ops;
+       dev->ethtool_ops = &cas_ethtool_ops;
        dev->watchdog_timeo = CAS_TX_TIMEOUT;
-       dev->change_mtu = cas_change_mtu;
+
 #ifdef USE_NAPI
-       dev->poll = cas_poll;
-       dev->weight = 64;
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = cas_netpoll;
+       netif_napi_add(dev, &cp->napi, cas_poll, 64);
 #endif
        dev->irq = pdev->irq;
        dev->dma = 0;
@@ -5133,23 +5136,17 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
                dev->features |= NETIF_F_HIGHDMA;
 
        if (register_netdev(dev)) {
-               printk(KERN_ERR PFX "Cannot register net device, "
-                      "aborting.\n");
+               dev_err(&pdev->dev, "Cannot register net device, aborting\n");
                goto err_out_free_consistent;
        }
 
        i = readl(cp->regs + REG_BIM_CFG);
-       printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
-              "Ethernet[%d] ",  dev->name, 
-              (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 
-              (i & BIM_CFG_32BIT) ? "32" : "64",
-              (i & BIM_CFG_66MHZ) ? "66" : "33",
-              (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); 
-
-       for (i = 0; i < 6; i++)
-               printk("%2.2x%c", dev->dev_addr[i],
-                      i == 5 ? ' ' : ':');
-       printk("\n");
+       netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+                   (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+                   (i & BIM_CFG_32BIT) ? "32" : "64",
+                   (i & BIM_CFG_66MHZ) ? "66" : "33",
+                   (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+                   dev->dev_addr);
 
        pci_set_drvdata(pdev, dev);
        cp->hw_running = 1;
@@ -5163,12 +5160,12 @@ err_out_free_consistent:
                            cp->init_block, cp->block_dvma);
 
 err_out_iounmap:
-       down(&cp->pm_sem);
+       mutex_lock(&cp->pm_mutex);
        if (cp->hw_running)
                cas_shutdown(cp);
-       up(&cp->pm_sem);
+       mutex_unlock(&cp->pm_mutex);
 
-       iounmap(cp->regs);
+       pci_iounmap(pdev, cp->regs);
 
 
 err_out_free_res:
@@ -5176,7 +5173,7 @@ err_out_free_res:
 
 err_write_cacheline:
        /* Try to restore it in case the error occured after we
-        * set it. 
+        * set it.
         */
        pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
 
@@ -5199,24 +5196,27 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
        cp = netdev_priv(dev);
        unregister_netdev(dev);
 
-       down(&cp->pm_sem);
+       if (cp->fw_data)
+               vfree(cp->fw_data);
+
+       mutex_lock(&cp->pm_mutex);
        flush_scheduled_work();
        if (cp->hw_running)
                cas_shutdown(cp);
-       up(&cp->pm_sem);
+       mutex_unlock(&cp->pm_mutex);
 
 #if 1
        if (cp->orig_cacheline_size) {
                /* Restore the cache line size if we had modified
                 * it.
                 */
-               pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 
+               pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
                                      cp->orig_cacheline_size);
        }
 #endif
        pci_free_consistent(pdev, sizeof(struct cas_init_block),
                            cp->init_block, cp->block_dvma);
-       iounmap(cp->regs);
+       pci_iounmap(pdev, cp->regs);
        free_netdev(dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
@@ -5230,11 +5230,8 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
        struct cas *cp = netdev_priv(dev);
        unsigned long flags;
 
-       /* We hold the PM semaphore during entire driver
-        * sleep time
-        */
-       down(&cp->pm_sem);
-       
+       mutex_lock(&cp->pm_mutex);
+
        /* If the driver is opened, we stop the DMA */
        if (cp->opened) {
                netif_device_detach(dev);
@@ -5253,6 +5250,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
 
        if (cp->hw_running)
                cas_shutdown(cp);
+       mutex_unlock(&cp->pm_mutex);
 
        return 0;
 }
@@ -5262,8 +5260,9 @@ static int cas_resume(struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct cas *cp = netdev_priv(dev);
 
-       printk(KERN_INFO "%s: resuming\n", dev->name);
+       netdev_info(dev, "resuming\n");
 
+       mutex_lock(&cp->pm_mutex);
        cas_hard_reset(cp);
        if (cp->opened) {
                unsigned long flags;
@@ -5276,7 +5275,7 @@ static int cas_resume(struct pci_dev *pdev)
 
                netif_device_attach(dev);
        }
-       up(&cp->pm_sem);
+       mutex_unlock(&cp->pm_mutex);
        return 0;
 }
 #endif /* CONFIG_PM */
@@ -5299,7 +5298,7 @@ static int __init cas_init(void)
        else
                link_transition_timeout = 0;
 
-       return pci_module_init(&cas_driver);
+       return pci_register_driver(&cas_driver);
 }
 
 static void __exit cas_cleanup(void)