Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
[safe/jmp/linux-2.6] / drivers / net / e1000e / netdev.c
index f012cc6..24507f3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2008 Intel Corporation.
+  Copyright(c) 1999 - 2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -36,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/tcp.h>
 #include <linux/ipv6.h>
+#include <linux/slab.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
 #include <linux/mii.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
 #include <linux/aer.h>
 
 #include "e1000.h"
 
-#define DRV_VERSION "0.3.3.4-k4"
+#define DRV_VERSION "1.0.2-k4"
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -62,18 +66,363 @@ static const struct e1000_info *e1000_info_tbl[] = {
        [board_ich8lan]         = &e1000_ich8_info,
        [board_ich9lan]         = &e1000_ich9_info,
        [board_ich10lan]        = &e1000_ich10_info,
+       [board_pchlan]          = &e1000_pch_info,
 };
 
-#ifdef DEBUG
-/**
- * e1000_get_hw_dev_name - return device name string
- * used by hardware layer to print debugging information
- **/
-char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
+struct e1000_reg_info {
+       u32 ofs;
+       char *name;
+};
+
+#define E1000_RDFH     0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT     0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS    0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS    0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC    0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH     0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT     0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+       /* General Registers */
+       {E1000_CTRL, "CTRL"},
+       {E1000_STATUS, "STATUS"},
+       {E1000_CTRL_EXT, "CTRL_EXT"},
+
+       /* Interrupt Registers */
+       {E1000_ICR, "ICR"},
+
+       /* RX Registers */
+       {E1000_RCTL, "RCTL"},
+       {E1000_RDLEN, "RDLEN"},
+       {E1000_RDH, "RDH"},
+       {E1000_RDT, "RDT"},
+       {E1000_RDTR, "RDTR"},
+       {E1000_RXDCTL(0), "RXDCTL"},
+       {E1000_ERT, "ERT"},
+       {E1000_RDBAL, "RDBAL"},
+       {E1000_RDBAH, "RDBAH"},
+       {E1000_RDFH, "RDFH"},
+       {E1000_RDFT, "RDFT"},
+       {E1000_RDFHS, "RDFHS"},
+       {E1000_RDFTS, "RDFTS"},
+       {E1000_RDFPC, "RDFPC"},
+
+       /* TX Registers */
+       {E1000_TCTL, "TCTL"},
+       {E1000_TDBAL, "TDBAL"},
+       {E1000_TDBAH, "TDBAH"},
+       {E1000_TDLEN, "TDLEN"},
+       {E1000_TDH, "TDH"},
+       {E1000_TDT, "TDT"},
+       {E1000_TIDV, "TIDV"},
+       {E1000_TXDCTL(0), "TXDCTL"},
+       {E1000_TADV, "TADV"},
+       {E1000_TARC(0), "TARC"},
+       {E1000_TDFH, "TDFH"},
+       {E1000_TDFT, "TDFT"},
+       {E1000_TDFHS, "TDFHS"},
+       {E1000_TDFTS, "TDFTS"},
+       {E1000_TDFPC, "TDFPC"},
+
+       /* List Terminator */
+       {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
 {
-       return hw->adapter->netdev->name;
+       int n = 0;
+       char rname[16];
+       u32 regs[8];
+
+       switch (reginfo->ofs) {
+       case E1000_RXDCTL(0):
+               for (n = 0; n < 2; n++)
+                       regs[n] = __er32(hw, E1000_RXDCTL(n));
+               break;
+       case E1000_TXDCTL(0):
+               for (n = 0; n < 2; n++)
+                       regs[n] = __er32(hw, E1000_TXDCTL(n));
+               break;
+       case E1000_TARC(0):
+               for (n = 0; n < 2; n++)
+                       regs[n] = __er32(hw, E1000_TARC(n));
+               break;
+       default:
+               printk(KERN_INFO "%-15s %08x\n",
+                       reginfo->name, __er32(hw, reginfo->ofs));
+               return;
+       }
+
+       snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+       printk(KERN_INFO "%-15s ", rname);
+       for (n = 0; n < 2; n++)
+               printk(KERN_CONT "%08x ", regs[n]);
+       printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_reg_info *reginfo;
+       struct e1000_ring *tx_ring = adapter->tx_ring;
+       struct e1000_tx_desc *tx_desc;
+       struct my_u0 { u64 a; u64 b; } *u0;
+       struct e1000_buffer *buffer_info;
+       struct e1000_ring *rx_ring = adapter->rx_ring;
+       union e1000_rx_desc_packet_split *rx_desc_ps;
+       struct e1000_rx_desc *rx_desc;
+       struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+       u32 staterr;
+       int i = 0;
+
+       if (!netif_msg_hw(adapter))
+               return;
+
+       /* Print netdevice Info */
+       if (netdev) {
+               dev_info(&adapter->pdev->dev, "Net device Info\n");
+               printk(KERN_INFO "Device Name     state            "
+                       "trans_start      last_rx\n");
+               printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+                       netdev->name,
+                       netdev->state,
+                       netdev->trans_start,
+                       netdev->last_rx);
+       }
+
+       /* Print Registers */
+       dev_info(&adapter->pdev->dev, "Register Dump\n");
+       printk(KERN_INFO " Register Name   Value\n");
+       for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+            reginfo->name; reginfo++) {
+               e1000_regdump(hw, reginfo);
+       }
+
+       /* Print TX Ring Summary */
+       if (!netdev || !netif_running(netdev))
+               goto exit;
+
+       dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+       printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
+               " leng ntw timestamp\n");
+       buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+       printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+               0, tx_ring->next_to_use, tx_ring->next_to_clean,
+               (u64)buffer_info->dma,
+               buffer_info->length,
+               buffer_info->next_to_watch,
+               (u64)buffer_info->time_stamp);
+
+       /* Print TX Rings */
+       if (!netif_msg_tx_done(adapter))
+               goto rx_ring_summary;
+
+       dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+       /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+        *
+        * Legacy Transmit Descriptor
+        *   +--------------------------------------------------------------+
+        * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
+        *   +--------------------------------------------------------------+
+        * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
+        *   +--------------------------------------------------------------+
+        *   63       48 47        36 35    32 31     24 23    16 15        0
+        *
+        * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+        *   63      48 47    40 39       32 31             16 15    8 7      0
+        *   +----------------------------------------------------------------+
+        * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
+        *   +----------------------------------------------------------------+
+        * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
+        *   +----------------------------------------------------------------+
+        *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
+        *
+        * Extended Data Descriptor (DTYP=0x1)
+        *   +----------------------------------------------------------------+
+        * 0 |                     Buffer Address [63:0]                      |
+        *   +----------------------------------------------------------------+
+        * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
+        *   +----------------------------------------------------------------+
+        *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
+        */
+       printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
+               " [bi->dma       ] leng  ntw timestamp        bi->skb "
+               "<-- Legacy format\n");
+       printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+               " [bi->dma       ] leng  ntw timestamp        bi->skb "
+               "<-- Ext Context format\n");
+       printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
+               " [bi->dma       ] leng  ntw timestamp        bi->skb "
+               "<-- Ext Data format\n");
+       for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+               tx_desc = E1000_TX_DESC(*tx_ring, i);
+               buffer_info = &tx_ring->buffer_info[i];
+               u0 = (struct my_u0 *)tx_desc;
+               printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
+                       "%04X  %3X %016llX %p",
+                      (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+                       ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+                      le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+                      (u64)buffer_info->dma, buffer_info->length,
+                      buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+                      buffer_info->skb);
+               if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+                       printk(KERN_CONT " NTC/U\n");
+               else if (i == tx_ring->next_to_use)
+                       printk(KERN_CONT " NTU\n");
+               else if (i == tx_ring->next_to_clean)
+                       printk(KERN_CONT " NTC\n");
+               else
+                       printk(KERN_CONT "\n");
+
+               if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+                       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+                                       16, 1, phys_to_virt(buffer_info->dma),
+                                       buffer_info->length, true);
+       }
+
+       /* Print RX Rings Summary */
+rx_ring_summary:
+       dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+       printk(KERN_INFO "Queue [NTU] [NTC]\n");
+       printk(KERN_INFO " %5d %5X %5X\n", 0,
+               rx_ring->next_to_use, rx_ring->next_to_clean);
+
+       /* Print RX Rings */
+       if (!netif_msg_rx_status(adapter))
+               goto exit;
+
+       dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+       switch (adapter->rx_ps_pages) {
+       case 1:
+       case 2:
+       case 3:
+               /* [Extended] Packet Split Receive Descriptor Format
+                *
+                *    +-----------------------------------------------------+
+                *  0 |                Buffer Address 0 [63:0]              |
+                *    +-----------------------------------------------------+
+                *  8 |                Buffer Address 1 [63:0]              |
+                *    +-----------------------------------------------------+
+                * 16 |                Buffer Address 2 [63:0]              |
+                *    +-----------------------------------------------------+
+                * 24 |                Buffer Address 3 [63:0]              |
+                *    +-----------------------------------------------------+
+                */
+               printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
+                       "[buffer 1 63:0 ] "
+                      "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
+                      "[bi->skb] <-- Ext Pkt Split format\n");
+               /* [Extended] Receive Descriptor (Write-Back) Format
+                *
+                *   63       48 47    32 31     13 12    8 7    4 3        0
+                *   +------------------------------------------------------+
+                * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
+                *   | Checksum | Ident  |         | Queue |      |  Type   |
+                *   +------------------------------------------------------+
+                * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+                *   +------------------------------------------------------+
+                *   63       48 47    32 31            20 19               0
+                */
+               printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
+                       "[vl   l0 ee  es] "
+                      "[ l3  l2  l1 hs] [reserved      ] ---------------- "
+                      "[bi->skb] <-- Ext Rx Write-Back format\n");
+               for (i = 0; i < rx_ring->count; i++) {
+                       buffer_info = &rx_ring->buffer_info[i];
+                       rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+                       u1 = (struct my_u1 *)rx_desc_ps;
+                       staterr =
+                               le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+                       if (staterr & E1000_RXD_STAT_DD) {
+                               /* Descriptor Done */
+                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
+                                       "%016llX %016llX %016llX "
+                                       "---------------- %p", i,
+                                       le64_to_cpu(u1->a),
+                                       le64_to_cpu(u1->b),
+                                       le64_to_cpu(u1->c),
+                                       le64_to_cpu(u1->d),
+                                       buffer_info->skb);
+                       } else {
+                               printk(KERN_INFO "R  [0x%03X]     %016llX "
+                                       "%016llX %016llX %016llX %016llX %p", i,
+                                       le64_to_cpu(u1->a),
+                                       le64_to_cpu(u1->b),
+                                       le64_to_cpu(u1->c),
+                                       le64_to_cpu(u1->d),
+                                       (u64)buffer_info->dma,
+                                       buffer_info->skb);
+
+                               if (netif_msg_pktdata(adapter))
+                                       print_hex_dump(KERN_INFO, "",
+                                               DUMP_PREFIX_ADDRESS, 16, 1,
+                                               phys_to_virt(buffer_info->dma),
+                                               adapter->rx_ps_bsize0, true);
+                       }
+
+                       if (i == rx_ring->next_to_use)
+                               printk(KERN_CONT " NTU\n");
+                       else if (i == rx_ring->next_to_clean)
+                               printk(KERN_CONT " NTC\n");
+                       else
+                               printk(KERN_CONT "\n");
+               }
+               break;
+       default:
+       case 0:
+               /* Legacy Receive Descriptor Format
+                *
+                * +-----------------------------------------------------+
+                * |                Buffer Address [63:0]                |
+                * +-----------------------------------------------------+
+                * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+                * +-----------------------------------------------------+
+                * 63       48 47    40 39      32 31         16 15      0
+                */
+               printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
+                       "[vl er S cks ln] [bi->dma       ] [bi->skb] "
+                       "<-- Legacy format\n");
+               for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+                       rx_desc = E1000_RX_DESC(*rx_ring, i);
+                       buffer_info = &rx_ring->buffer_info[i];
+                       u0 = (struct my_u0 *)rx_desc;
+                       printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
+                               "%016llX %p",
+                               i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+                               (u64)buffer_info->dma, buffer_info->skb);
+                       if (i == rx_ring->next_to_use)
+                               printk(KERN_CONT " NTU\n");
+                       else if (i == rx_ring->next_to_clean)
+                               printk(KERN_CONT " NTC\n");
+                       else
+                               printk(KERN_CONT "\n");
+
+                       if (netif_msg_pktdata(adapter))
+                               print_hex_dump(KERN_INFO, "",
+                                       DUMP_PREFIX_ADDRESS,
+                                       16, 1, phys_to_virt(buffer_info->dma),
+                                       adapter->rx_buffer_len, true);
+               }
+       }
+
+exit:
+       return;
 }
-#endif
 
 /**
  * e1000_desc_unused - calculate if we have unused descriptors
@@ -166,7 +515,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+       unsigned int bufsz = adapter->rx_buffer_len;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
@@ -178,26 +527,19 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                        goto map_skb;
                }
 
-               skb = netdev_alloc_skb(netdev, bufsz);
+               skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                if (!skb) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /*
-                * Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
 map_skb:
-               buffer_info->dma = pci_map_single(pdev, skb->data,
+               buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
                                                  adapter->rx_buffer_len,
-                                                 PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+                                                 DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        break;
@@ -206,26 +548,23 @@ map_skb:
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 
+               if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+                       /*
+                        * Force memory writes to complete before letting h/w
+                        * know there are new descriptors to fetch.  (Only
+                        * applicable for weak-ordered memory model archs,
+                        * such as IA-64).
+                        */
+                       wmb();
+                       writel(i, adapter->hw.hw_addr + rx_ring->tail);
+               }
                i++;
                if (i == rx_ring->count)
                        i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               /*
-                * Force memory writes to complete before letting h/w
-                * know there are new descriptors to fetch.  (Only
-                * applicable for weak-ordered memory model archs,
-                * such as IA-64).
-                */
-               wmb();
-               writel(i, adapter->hw.hw_addr + rx_ring->tail);
-       }
+       rx_ring->next_to_use = i;
 }
 
 /**
@@ -263,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                        adapter->alloc_rx_buff_failed++;
                                        goto no_buffers;
                                }
-                               ps_page->dma = pci_map_page(pdev,
-                                                  ps_page->page,
-                                                  0, PAGE_SIZE,
-                                                  PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+                               ps_page->dma = dma_map_page(&pdev->dev,
+                                                           ps_page->page,
+                                                           0, PAGE_SIZE,
+                                                           DMA_FROM_DEVICE);
+                               if (dma_mapping_error(&pdev->dev,
+                                                     ps_page->dma)) {
                                        dev_err(&adapter->pdev->dev,
                                          "RX DMA page map failed\n");
                                        adapter->rx_dma_failed++;
@@ -283,26 +623,19 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                             cpu_to_le64(ps_page->dma);
                }
 
-               skb = netdev_alloc_skb(netdev,
-                                      adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(netdev,
+                                               adapter->rx_ps_bsize0);
 
                if (!skb) {
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /*
-                * Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
-               buffer_info->dma = pci_map_single(pdev, skb->data,
+               buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
                                                  adapter->rx_ps_bsize0,
-                                                 PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+                                                 DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        /* cleanup skb */
@@ -313,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
                rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 
+               if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+                       /*
+                        * Force memory writes to complete before letting h/w
+                        * know there are new descriptors to fetch.  (Only
+                        * applicable for weak-ordered memory model archs,
+                        * such as IA-64).
+                        */
+                       wmb();
+                       writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+               }
+
                i++;
                if (i == rx_ring->count)
                        i = 0;
@@ -320,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
        }
 
 no_buffers:
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-
-               if (!(i--))
-                       i = (rx_ring->count - 1);
-
-               /*
-                * Force memory writes to complete before letting h/w
-                * know there are new descriptors to fetch.  (Only
-                * applicable for weak-ordered memory model archs,
-                * such as IA-64).
-                */
-               wmb();
-               /*
-                * Hardware increments by 16 bytes, but packet split
-                * descriptors are 32 bytes...so we increment tail
-                * twice as much.
-                */
-               writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
-       }
+       rx_ring->next_to_use = i;
 }
 
 /**
@@ -358,9 +683,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
-       unsigned int bufsz = 256 -
-                            16 /* for skb_reserve */ -
-                            NET_IP_ALIGN;
+       unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
@@ -372,19 +695,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
                        goto check_page;
                }
 
-               skb = netdev_alloc_skb(netdev, bufsz);
+               skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
                        break;
                }
 
-               /* Make buffer alignment 2 beyond a 16 byte boundary
-                * this will result in a 16 byte aligned IP header after
-                * the 14 byte MAC header is removed
-                */
-               skb_reserve(skb, NET_IP_ALIGN);
-
                buffer_info->skb = skb;
 check_page:
                /* allocate a new page if necessary */
@@ -397,10 +714,10 @@ check_page:
                }
 
                if (!buffer_info->dma)
-                       buffer_info->dma = pci_map_page(pdev,
+                       buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
                                                        PAGE_SIZE,
-                                                       PCI_DMA_FROMDEVICE);
+                                                       DMA_FROM_DEVICE);
 
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -436,6 +753,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
+       struct e1000_hw *hw = &adapter->hw;
        struct e1000_ring *rx_ring = adapter->rx_ring;
        struct e1000_rx_desc *rx_desc, *next_rxd;
        struct e1000_buffer *buffer_info, *next_buffer;
@@ -473,22 +791,31 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                cleaned = 1;
                cleaned_count++;
-               pci_unmap_single(pdev,
+               dma_unmap_single(&pdev->dev,
                                 buffer_info->dma,
                                 adapter->rx_buffer_len,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
                length = le16_to_cpu(rx_desc->length);
 
-               /* !EOP means multiple descriptors were used to store a single
-                * packet, also make sure the frame isn't just CRC only */
-               if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+               /*
+                * !EOP means multiple descriptors were used to store a single
+                * packet, if that's the case we need to toss it.  In fact, we
+                * need to toss every packet with the EOP bit clear and the
+                * next frame that _does_ have the EOP bit set, as it is by
+                * definition only a frame fragment
+                */
+               if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+                       adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+               if (adapter->flags2 & FLAG2_IS_DISCARDING) {
                        /* All receives must fit into a single buffer */
-                       e_dbg("%s: Receive packet consumed multiple buffers\n",
-                             netdev->name);
+                       e_dbg("Receive packet consumed multiple buffers\n");
                        /* recycle */
                        buffer_info->skb = skb;
+                       if (status & E1000_RXD_STAT_EOP)
+                               adapter->flags2 &= ~FLAG2_IS_DISCARDING;
                        goto next_desc;
                }
 
@@ -512,9 +839,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                 */
                if (length < copybreak) {
                        struct sk_buff *new_skb =
-                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+                           netdev_alloc_skb_ip_align(netdev, length);
                        if (new_skb) {
-                               skb_reserve(new_skb, NET_IP_ALIGN);
                                skb_copy_to_linear_data_offset(new_skb,
                                                               -NET_IP_ALIGN,
                                                               (skb->data -
@@ -559,33 +885,51 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       adapter->net_stats.rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       netdev->stats.rx_bytes += total_rx_bytes;
+       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
 static void e1000_put_txbuf(struct e1000_adapter *adapter,
                             struct e1000_buffer *buffer_info)
 {
-       buffer_info->dma = 0;
+       if (buffer_info->dma) {
+               if (buffer_info->mapped_as_page)
+                       dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+                                      buffer_info->length, DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+                                        buffer_info->length, DMA_TO_DEVICE);
+               buffer_info->dma = 0;
+       }
        if (buffer_info->skb) {
-               skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-                             DMA_TO_DEVICE);
                dev_kfree_skb_any(buffer_info->skb);
                buffer_info->skb = NULL;
        }
        buffer_info->time_stamp = 0;
 }
 
-static void e1000_print_tx_hang(struct e1000_adapter *adapter)
+static void e1000_print_hw_hang(struct work_struct *work)
 {
+       struct e1000_adapter *adapter = container_of(work,
+                                                    struct e1000_adapter,
+                                                    print_hang_task);
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int i = tx_ring->next_to_clean;
        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
        struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
+       struct e1000_hw *hw = &adapter->hw;
+       u16 phy_status, phy_1000t_status, phy_ext_status;
+       u16 pci_status;
 
-       /* detected Tx unit hang */
-       e_err("Detected Tx Unit Hang:\n"
+       e1e_rphy(hw, PHY_STATUS, &phy_status);
+       e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
+       e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+
+       pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
+
+       /* detected Hardware unit hang */
+       e_err("Detected Hardware Unit Hang:\n"
              "  TDH                  <%x>\n"
              "  TDT                  <%x>\n"
              "  next_to_use          <%x>\n"
@@ -594,7 +938,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
              "  time_stamp           <%lx>\n"
              "  next_to_watch        <%x>\n"
              "  jiffies              <%lx>\n"
-             "  next_to_watch.status <%x>\n",
+             "  next_to_watch.status <%x>\n"
+             "MAC Status             <%x>\n"
+             "PHY Status             <%x>\n"
+             "PHY 1000BASE-T Status  <%x>\n"
+             "PHY Extended Status    <%x>\n"
+             "PCI Status             <%x>\n",
              readl(adapter->hw.hw_addr + tx_ring->head),
              readl(adapter->hw.hw_addr + tx_ring->tail),
              tx_ring->next_to_use,
@@ -602,7 +951,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
              tx_ring->buffer_info[eop].time_stamp,
              eop,
              jiffies,
-             eop_desc->upper.fields.status);
+             eop_desc->upper.fields.status,
+             er32(STATUS),
+             phy_status,
+             phy_1000t_status,
+             phy_ext_status,
+             pci_status);
 }
 
 /**
@@ -636,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                        cleaned = (i == eop);
 
                        if (cleaned) {
-                               struct sk_buff *skb = buffer_info->skb;
-                               unsigned int segs, bytecount;
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
-                               total_tx_packets += segs;
-                               total_tx_bytes += bytecount;
+                               total_tx_packets += buffer_info->segs;
+                               total_tx_bytes += buffer_info->bytecount;
                        }
 
                        e1000_put_txbuf(adapter, buffer_info);
@@ -654,6 +1002,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                                i = 0;
                }
 
+               if (i == tx_ring->next_to_use)
+                       break;
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
@@ -676,21 +1026,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        }
 
        if (adapter->detect_tx_hung) {
-               /* Detect a transmit hang in hardware, this serializes the
-                * check with the clearing of time_stamp and movement of i */
+               /*
+                * Detect a transmit hang in hardware, this serializes the
+                * check with the clearing of time_stamp and movement of i
+                */
                adapter->detect_tx_hung = 0;
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
-                              + (adapter->tx_timeout_factor * HZ))
-                   && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
-                       e1000_print_tx_hang(adapter);
+                              + (adapter->tx_timeout_factor * HZ)) &&
+                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+                       schedule_work(&adapter->print_hang_task);
                        netif_stop_queue(netdev);
                }
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
-       adapter->net_stats.tx_bytes += total_tx_bytes;
-       adapter->net_stats.tx_packets += total_tx_packets;
+       netdev->stats.tx_bytes += total_tx_bytes;
+       netdev->stats.tx_packets += total_tx_packets;
        return (count < tx_ring->count);
 }
 
@@ -704,6 +1056,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                  int *work_done, int work_to_do)
 {
+       struct e1000_hw *hw = &adapter->hw;
        union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -741,15 +1094,21 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 
                cleaned = 1;
                cleaned_count++;
-               pci_unmap_single(pdev, buffer_info->dma,
+               dma_unmap_single(&pdev->dev, buffer_info->dma,
                                 adapter->rx_ps_bsize0,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
-               if (!(staterr & E1000_RXD_STAT_EOP)) {
-                       e_dbg("%s: Packet Split buffers didn't pick up the "
-                             "full packet\n", netdev->name);
+               /* see !EOP comment in other rx routine */
+               if (!(staterr & E1000_RXD_STAT_EOP))
+                       adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+               if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+                       e_dbg("Packet Split buffers didn't pick up the full "
+                             "packet\n");
                        dev_kfree_skb_irq(skb);
+                       if (staterr & E1000_RXD_STAT_EOP)
+                               adapter->flags2 &= ~FLAG2_IS_DISCARDING;
                        goto next_desc;
                }
 
@@ -761,8 +1120,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                length = le16_to_cpu(rx_desc->wb.middle.length0);
 
                if (!length) {
-                       e_dbg("%s: Last part of the packet spanning multiple "
-                             "descriptors\n", netdev->name);
+                       e_dbg("Last part of the packet spanning multiple "
+                             "descriptors\n");
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
@@ -793,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                         * kmap_atomic, so we can't hold the mapping
                         * very long
                         */
-                       pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+                                               PAGE_SIZE, DMA_FROM_DEVICE);
                        vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
                        memcpy(skb_tail_pointer(skb), vaddr, l1);
                        kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
-                       pci_dma_sync_single_for_device(pdev, ps_page->dma,
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+                                                  PAGE_SIZE, DMA_FROM_DEVICE);
 
                        /* remove the CRC */
                        if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -816,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                break;
 
                        ps_page = &buffer_info->ps_pages[j];
-                       pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+                                      DMA_FROM_DEVICE);
                        ps_page->dma = 0;
                        skb_fill_page_desc(skb, j, ps_page->page, 0, length);
                        ps_page->page = NULL;
@@ -870,8 +1229,8 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       adapter->net_stats.rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       netdev->stats.rx_bytes += total_rx_bytes;
+       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -935,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 
                cleaned = true;
                cleaned_count++;
-               pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
-                              PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+                              DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
                length = le16_to_cpu(rx_desc->length);
@@ -1050,8 +1409,8 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       adapter->net_stats.rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       netdev->stats.rx_bytes += total_rx_bytes;
+       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -1072,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
                        if (adapter->clean_rx == e1000_clean_rx_irq)
-                               pci_unmap_single(pdev, buffer_info->dma,
+                               dma_unmap_single(&pdev->dev, buffer_info->dma,
                                                 adapter->rx_buffer_len,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
-                               pci_unmap_page(pdev, buffer_info->dma,
+                               dma_unmap_page(&pdev->dev, buffer_info->dma,
                                               PAGE_SIZE,
-                                              PCI_DMA_FROMDEVICE);
+                                              DMA_FROM_DEVICE);
                        else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
-                               pci_unmap_single(pdev, buffer_info->dma,
+                               dma_unmap_single(&pdev->dev, buffer_info->dma,
                                                 adapter->rx_ps_bsize0,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        buffer_info->dma = 0;
                }
 
@@ -1100,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
                        ps_page = &buffer_info->ps_pages[j];
                        if (!ps_page->page)
                                break;
-                       pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+                                      DMA_FROM_DEVICE);
                        ps_page->dma = 0;
                        put_page(ps_page->page);
                        ps_page->page = NULL;
@@ -1119,6 +1478,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
+       adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 
        writel(0, adapter->hw.hw_addr + rx_ring->head);
        writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -1198,7 +1558,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl, icr = er32(ICR);
 
-       if (!icr)
+       if (!icr || test_bit(__E1000_DOWN, &adapter->state))
                return IRQ_NONE;  /* Not our interrupt */
 
        /*
@@ -1407,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
                pci_disable_msi(adapter->pdev);
                adapter->flags &= ~FLAG_MSI_ENABLED;
        }
-
-       return;
 }
 
 /**
@@ -1460,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                /* Don't do anything; this is the system default */
                break;
        }
-
-       return;
 }
 
 /**
@@ -1480,7 +1836,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        else
                memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+                         e1000_intr_msix_rx, 0, adapter->rx_ring->name,
                          netdev);
        if (err)
                goto out;
@@ -1493,7 +1849,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        else
                memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+                         e1000_intr_msix_tx, 0, adapter->tx_ring->name,
                          netdev);
        if (err)
                goto out;
@@ -1502,7 +1858,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        vector++;
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &e1000_msix_other, 0, netdev->name, netdev);
+                         e1000_msix_other, 0, netdev->name, netdev);
        if (err)
                goto out;
 
@@ -1533,7 +1889,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
                e1000e_set_interrupt_capability(adapter);
        }
        if (adapter->flags & FLAG_MSI_ENABLED) {
-               err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
+               err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
                                  netdev->name, netdev);
                if (!err)
                        return err;
@@ -1543,7 +1899,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
                adapter->int_mode = E1000E_INT_MODE_LEGACY;
        }
 
-       err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
+       err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
                          netdev->name, netdev);
        if (err)
                e_err("Unable to allocate interrupt, Error: %d\n", err);
@@ -1996,7 +2352,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *poll_dev = adapter->netdev;
-       int tx_cleaned = 0, work_done = 0;
+       int tx_cleaned = 1, work_done = 0;
 
        adapter = netdev_priv(poll_dev);
 
@@ -2039,11 +2395,14 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
            (vid == adapter->mng_vlan_id))
                return;
+
        /* add VID to filter table */
-       index = (vid >> 5) & 0x7F;
-       vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-       vfta |= (1 << (vid & 0x1F));
-       e1000e_write_vfta(hw, index, vfta);
+       if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+               index = (vid >> 5) & 0x7F;
+               vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+               vfta |= (1 << (vid & 0x1F));
+               hw->mac.ops.write_vfta(hw, index, vfta);
+       }
 }
 
 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2068,10 +2427,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        /* remove VID from filter table */
-       index = (vid >> 5) & 0x7F;
-       vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-       vfta &= ~(1 << (vid & 0x1F));
-       e1000e_write_vfta(hw, index, vfta);
+       if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+               index = (vid >> 5) & 0x7F;
+               vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+               vfta &= ~(1 << (vid & 0x1F));
+               hw->mac.ops.write_vfta(hw, index, vfta);
+       }
 }
 
 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
@@ -2161,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
        }
 }
 
-static void e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 manc, manc2h;
+       u32 manc, manc2h, mdef, i, j;
 
        if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
                return;
@@ -2178,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
         */
        manc |= E1000_MANC_EN_MNG2HOST;
        manc2h = er32(MANC2H);
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
-       manc2h |= E1000_MNG2HOST_PORT_623;
-       manc2h |= E1000_MNG2HOST_PORT_664;
+
+       switch (hw->mac.type) {
+       default:
+               manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+               break;
+       case e1000_82574:
+       case e1000_82583:
+               /*
+                * Check if IPMI pass-through decision filter already exists;
+                * if so, enable it.
+                */
+               for (i = 0, j = 0; i < 8; i++) {
+                       mdef = er32(MDEF(i));
+
+                       /* Ignore filters with anything other than IPMI ports */
+                       if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+                               continue;
+
+                       /* Enable this decision filter in MANC2H */
+                       if (mdef)
+                               manc2h |= (1 << i);
+
+                       j |= mdef;
+               }
+
+               if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+                       break;
+
+               /* Create new decision filter in an empty filter */
+               for (i = 0, j = 0; i < 8; i++)
+                       if (er32(MDEF(i)) == 0) {
+                               ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+                                              E1000_MDEF_PORT_664));
+                               manc2h |= (1 << 1);
+                               j++;
+                               break;
+                       }
+
+               if (!j)
+                       e_warn("Unable to create IPMI pass-through filter\n");
+               break;
+       }
+
        ew32(MANC2H, manc2h);
        ew32(MANC, manc);
 }
@@ -2255,8 +2655,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                ew32(TARC(1), tarc);
        }
 
-       e1000e_config_collision_dist(hw);
-
        /* Setup Transmit Descriptor Settings for eop descriptor */
        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
 
@@ -2269,7 +2667,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 
        ew32(TCTL, tctl);
 
-       adapter->tx_queue_len = adapter->netdev->tx_queue_len;
+       e1000e_config_collision_dist(hw);
 }
 
 /**
@@ -2308,22 +2706,27 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        if (adapter->flags2 & FLAG2_CRC_STRIPPING)
                rctl |= E1000_RCTL_SECRC;
 
+       /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+       if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+               u16 phy_data;
+
+               e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+               phy_data &= 0xfff8;
+               phy_data |= (1 << 2);
+               e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+               e1e_rphy(hw, 22, &phy_data);
+               phy_data &= 0x0fff;
+               phy_data |= (1 << 14);
+               e1e_wphy(hw, 0x10, 0x2823);
+               e1e_wphy(hw, 0x11, 0x0003);
+               e1e_wphy(hw, 22, phy_data);
+       }
+
        /* Setup buffer sizes */
        rctl &= ~E1000_RCTL_SZ_4096;
        rctl |= E1000_RCTL_BSEX;
        switch (adapter->rx_buffer_len) {
-       case 256:
-               rctl |= E1000_RCTL_SZ_256;
-               rctl &= ~E1000_RCTL_BSEX;
-               break;
-       case 512:
-               rctl |= E1000_RCTL_SZ_512;
-               rctl &= ~E1000_RCTL_BSEX;
-               break;
-       case 1024:
-               rctl |= E1000_RCTL_SZ_1024;
-               rctl &= ~E1000_RCTL_BSEX;
-               break;
        case 2048:
        default:
                rctl |= E1000_RCTL_SZ_2048;
@@ -2446,8 +2849,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
                ew32(ITR, 1000000000 / (adapter->itr * 256));
 
        ctrl_ext = er32(CTRL_EXT);
-       /* Reset delay timers after every interrupt */
-       ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
        /* Auto-Mask interrupts upon ICR access */
        ctrl_ext |= E1000_CTRL_EXT_IAME;
        ew32(IAM, 0xffffffff);
@@ -2489,21 +2890,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
         * packet size is equal or larger than the specified value (in 8 byte
         * units), e.g. using jumbo frames when setting to E1000_ERT_2048
         */
-       if ((adapter->flags & FLAG_HAS_ERT) &&
-           (adapter->netdev->mtu > ETH_DATA_LEN)) {
-               u32 rxdctl = er32(RXDCTL(0));
-               ew32(RXDCTL(0), rxdctl | 0x3);
-               ew32(ERT, E1000_ERT_2048 | (1 << 13));
-               /*
-                * With jumbo frames and early-receive enabled, excessive
-                * C4->C2 latencies result in dropped transactions.
-                */
-               pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
-                                         e1000e_driver_name, 55);
-       } else {
-               pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
-                                         e1000e_driver_name,
-                                         PM_QOS_DEFAULT_VALUE);
+       if (adapter->flags & FLAG_HAS_ERT) {
+               if (adapter->netdev->mtu > ETH_DATA_LEN) {
+                       u32 rxdctl = er32(RXDCTL(0));
+                       ew32(RXDCTL(0), rxdctl | 0x3);
+                       ew32(ERT, E1000_ERT_2048 | (1 << 13));
+                       /*
+                        * With jumbo frames and early-receive enabled,
+                        * excessive C-state transition latencies result in
+                        * dropped transactions.
+                        */
+                       pm_qos_update_request(
+                               adapter->netdev->pm_qos_req, 55);
+               } else {
+                       pm_qos_update_request(
+                               adapter->netdev->pm_qos_req,
+                               PM_QOS_DEFAULT_VALUE);
+               }
        }
 
        /* Enable Receives */
@@ -2515,22 +2918,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
  *  @hw: pointer to the HW structure
  *  @mc_addr_list: array of multicast addresses to program
  *  @mc_addr_count: number of multicast addresses to program
- *  @rar_used_count: the first RAR register free to program
- *  @rar_count: total number of supported Receive Address Registers
  *
- *  Updates the Receive Address Registers and Multicast Table Array.
+ *  Updates the Multicast Table Array.
  *  The caller must have a packed mc_addr_list of multicast addresses.
- *  The parameter rar_count will usually be hw->mac.rar_entry_count
- *  unless there are workarounds that change this.  Currently no func pointer
- *  exists and all implementations are handled in the generic version of this
- *  function.
  **/
 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
-                                     u32 mc_addr_count, u32 rar_used_count,
-                                     u32 rar_count)
+                                     u32 mc_addr_count)
 {
-       hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
-                                       rar_used_count, rar_count);
+       hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
 }
 
 /**
@@ -2546,8 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_mac_info *mac = &hw->mac;
-       struct dev_mc_list *mc_ptr;
+       struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
        int i;
@@ -2572,31 +2966,24 @@ static void e1000_set_multi(struct net_device *netdev)
 
        ew32(RCTL, rctl);
 
-       if (netdev->mc_count) {
-               mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+       if (!netdev_mc_empty(netdev)) {
+               mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
                if (!mta_list)
                        return;
 
                /* prepare a packed array of only addresses. */
-               mc_ptr = netdev->mc_list;
-
-               for (i = 0; i < netdev->mc_count; i++) {
-                       if (!mc_ptr)
-                               break;
-                       memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
-                              ETH_ALEN);
-                       mc_ptr = mc_ptr->next;
-               }
+               i = 0;
+               netdev_for_each_mc_addr(ha, netdev)
+                       memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
 
-               e1000_update_mc_addr_list(hw, mta_list, i, 1,
-                                         mac->rar_entry_count);
+               e1000_update_mc_addr_list(hw, mta_list, i);
                kfree(mta_list);
        } else {
                /*
                 * if we're called from probe, we might not have
                 * anything to do here, so clear out the list
                 */
-               e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
+               e1000_update_mc_addr_list(hw, NULL, 0);
        }
 }
 
@@ -2609,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
        e1000_set_multi(adapter->netdev);
 
        e1000_restore_vlan(adapter);
-       e1000_init_manageability(adapter);
+       e1000_init_manageability_pt(adapter);
 
        e1000_configure_tx(adapter);
        e1000_setup_rctl(adapter);
@@ -2627,18 +3014,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
  **/
 void e1000e_power_up_phy(struct e1000_adapter *adapter)
 {
-       u16 mii_reg = 0;
-
-       /* Just clear the power down bit to wake the phy back up */
-       if (adapter->hw.phy.media_type == e1000_media_type_copper) {
-               /*
-                * According to the manual, the phy will retain its
-                * settings across a power-down/up cycle
-                */
-               e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
-               mii_reg &= ~MII_CR_POWER_DOWN;
-               e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
-       }
+       if (adapter->hw.phy.ops.power_up)
+               adapter->hw.phy.ops.power_up(&adapter->hw);
 
        adapter->hw.mac.ops.setup_link(&adapter->hw);
 }
@@ -2646,35 +3023,17 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
 /**
  * e1000_power_down_phy - Power down the PHY
  *
- * Power down the PHY so no link is implied when interface is down
- * The PHY cannot be powered down is management or WoL is active
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
  */
 static void e1000_power_down_phy(struct e1000_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
-       u16 mii_reg;
-
        /* WoL is enabled */
        if (adapter->wol)
                return;
 
-       /* non-copper PHY? */
-       if (adapter->hw.phy.media_type != e1000_media_type_copper)
-               return;
-
-       /* reset is blocked because of a SoL/IDER session */
-       if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
-               return;
-
-       /* manageability (AMT) is enabled */
-       if (er32(MANC) & E1000_MANC_SMBUS_EN)
-               return;
-
-       /* power down the PHY */
-       e1e_rphy(hw, PHY_CONTROL, &mii_reg);
-       mii_reg |= MII_CR_POWER_DOWN;
-       e1e_wphy(hw, PHY_CONTROL, mii_reg);
-       mdelay(1);
+       if (adapter->hw.phy.ops.power_down)
+               adapter->hw.phy.ops.power_down(&adapter->hw);
 }
 
 /**
@@ -2759,15 +3118,31 @@ void e1000e_reset(struct e1000_adapter *adapter)
         *   with ERT support assuming ERT set to E1000_ERT_2048), or
         * - the full Rx FIFO size minus one full frame
         */
-       if (adapter->flags & FLAG_HAS_ERT)
-               hwm = min(((pba << 10) * 9 / 10),
-                         ((pba << 10) - (E1000_ERT_2048 << 3)));
-       else
-               hwm = min(((pba << 10) * 9 / 10),
-                         ((pba << 10) - adapter->max_frame_size));
+       if (hw->mac.type == e1000_pchlan) {
+               /*
+                * Workaround PCH LOM adapter hangs with certain network
+                * loads.  If hangs persist, try disabling Tx flow control.
+                */
+               if (adapter->netdev->mtu > ETH_DATA_LEN) {
+                       fc->high_water = 0x3500;
+                       fc->low_water  = 0x1500;
+               } else {
+                       fc->high_water = 0x5000;
+                       fc->low_water  = 0x3000;
+               }
+               fc->refresh_time = 0x1000;
+       } else {
+               if ((adapter->flags & FLAG_HAS_ERT) &&
+                   (adapter->netdev->mtu > ETH_DATA_LEN))
+                       hwm = min(((pba << 10) * 9 / 10),
+                                 ((pba << 10) - (E1000_ERT_2048 << 3)));
+               else
+                       hwm = min(((pba << 10) * 9 / 10),
+                                 ((pba << 10) - adapter->max_frame_size));
 
-       fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
-       fc->low_water = fc->high_water - 8;
+               fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+               fc->low_water = fc->high_water - 8;
+       }
 
        if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
                fc->pause_time = 0xFFFF;
@@ -2787,6 +3162,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
                e1000_get_hw_control(adapter);
 
        ew32(WUC, 0);
+       if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
+               e1e_wphy(&adapter->hw, BM_WUC, 0);
 
        if (mac->ops.init_hw(hw))
                e_err("Hardware Error\n");
@@ -2799,7 +3176,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
        e1000e_reset_adaptive(hw);
        e1000_get_phy_info(hw);
 
-       if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+       if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+           !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
                u16 phy_data = 0;
                /*
                 * speed up time to link by disabling smart power down, ignore
@@ -2816,6 +3194,12 @@ int e1000e_up(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
 
+       /* DMA latency requirement to workaround early-receive/jumbo issue */
+       if (adapter->flags & FLAG_HAS_ERT)
+               adapter->netdev->pm_qos_req =
+                       pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
+                                      PM_QOS_DEFAULT_VALUE);
+
        /* hardware has been reset, we need to reload some things */
        e1000_configure(adapter);
 
@@ -2829,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
        netif_wake_queue(adapter->netdev);
 
        /* fire a link change interrupt to start the watchdog */
-       ew32(ICS, E1000_ICS_LSC);
+       if (adapter->msix_entries)
+               ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+       else
+               ew32(ICS, E1000_ICS_LSC);
+
        return 0;
 }
 
@@ -2866,7 +3254,6 @@ void e1000e_down(struct e1000_adapter *adapter)
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
-       netdev->tx_queue_len = adapter->tx_queue_len;
        netif_carrier_off(netdev);
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
@@ -2876,6 +3263,12 @@ void e1000e_down(struct e1000_adapter *adapter)
        e1000_clean_tx_ring(adapter);
        e1000_clean_rx_ring(adapter);
 
+       if (adapter->flags & FLAG_HAS_ERT) {
+               pm_qos_remove_request(
+                             adapter->netdev->pm_qos_req);
+               adapter->netdev->pm_qos_req = NULL;
+       }
+
        /*
         * TODO: for power management, we could drop the link and
         * pci_disable_device here.
@@ -2933,7 +3326,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
        struct e1000_hw *hw = &adapter->hw;
        u32 icr = er32(ICR);
 
-       e_dbg("%s: icr is %08X\n", netdev->name, icr);
+       e_dbg("icr is %08X\n", icr);
        if (icr & E1000_ICR_RXSEQ) {
                adapter->flags &= ~FLAG_MSI_TEST_FAILED;
                wmb();
@@ -2970,7 +3363,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
        if (err)
                goto msi_test_failed;
 
-       err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
+       err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
                          netdev->name, netdev);
        if (err) {
                pci_disable_msi(adapter->pdev);
@@ -3003,7 +3396,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
                goto msi_test_failed;
 
        /* okay so the test worked, restore settings */
-       e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
+       e_dbg("MSI interrupt test succeeded!\n");
 msi_test_failed:
        e1000e_set_interrupt_capability(adapter);
        e1000_request_irq(adapter);
@@ -3068,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       struct pci_dev *pdev = adapter->pdev;
        int err;
 
        /* disallow open during test */
        if (test_bit(__E1000_TESTING, &adapter->state))
                return -EBUSY;
 
+       pm_runtime_get_sync(&pdev->dev);
+
        netif_carrier_off(netdev);
 
        /* allocate transmit descriptors */
@@ -3086,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
        if (err)
                goto err_setup_rx;
 
+       /*
+        * If AMT is enabled, let the firmware know that the network
+        * interface is now open and reset the part to a known state.
+        */
+       if (adapter->flags & FLAG_HAS_AMT) {
+               e1000_get_hw_control(adapter);
+               e1000e_reset(adapter);
+       }
+
        e1000e_power_up_phy(adapter);
 
        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3094,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
                e1000_update_mng_vlan(adapter);
 
        /*
-        * If AMT is enabled, let the firmware know that the network
-        * interface is now open
-        */
-       if (adapter->flags & FLAG_HAS_AMT)
-               e1000_get_hw_control(adapter);
-
-       /*
         * before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
@@ -3134,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
 
        netif_start_queue(netdev);
 
+       adapter->idle_check = true;
+       pm_runtime_put(&pdev->dev);
+
        /* fire a link status change interrupt to start the watchdog */
-       ew32(ICS, E1000_ICS_LSC);
+       if (adapter->msix_entries)
+               ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+       else
+               ew32(ICS, E1000_ICS_LSC);
 
        return 0;
 
@@ -3147,6 +3551,7 @@ err_setup_rx:
        e1000e_free_tx_resources(adapter);
 err_setup_tx:
        e1000e_reset(adapter);
+       pm_runtime_put_sync(&pdev->dev);
 
        return err;
 }
@@ -3165,11 +3570,17 @@ err_setup_tx:
 static int e1000_close(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct pci_dev *pdev = adapter->pdev;
 
        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
-       e1000e_down(adapter);
+
+       pm_runtime_get_sync(&pdev->dev);
+
+       if (!test_bit(__E1000_DOWN, &adapter->state)) {
+               e1000e_down(adapter);
+               e1000_free_irq(adapter);
+       }
        e1000_power_down_phy(adapter);
-       e1000_free_irq(adapter);
 
        e1000e_free_tx_resources(adapter);
        e1000e_free_rx_resources(adapter);
@@ -3191,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
        if (adapter->flags & FLAG_HAS_AMT)
                e1000_release_hw_control(adapter);
 
+       pm_runtime_put_sync(&pdev->dev);
+
        return 0;
 }
 /**
@@ -3264,8 +3677,10 @@ static void e1000_update_phy_info(unsigned long data)
  **/
 void e1000e_update_stats(struct e1000_adapter *adapter)
 {
+       struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
+       u16 phy_data;
 
        /*
         * Prevent stats update while adapter is being reset, or if the pci
@@ -3285,11 +3700,34 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.roc += er32(ROC);
 
        adapter->stats.mpc += er32(MPC);
-       adapter->stats.scc += er32(SCC);
-       adapter->stats.ecol += er32(ECOL);
-       adapter->stats.mcc += er32(MCC);
-       adapter->stats.latecol += er32(LATECOL);
-       adapter->stats.dc += er32(DC);
+       if ((hw->phy.type == e1000_phy_82578) ||
+           (hw->phy.type == e1000_phy_82577)) {
+               e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+                       adapter->stats.scc += phy_data;
+
+               e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+                       adapter->stats.ecol += phy_data;
+
+               e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+                       adapter->stats.mcc += phy_data;
+
+               e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+                       adapter->stats.latecol += phy_data;
+
+               e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+                       adapter->stats.dc += phy_data;
+       } else {
+               adapter->stats.scc += er32(SCC);
+               adapter->stats.ecol += er32(ECOL);
+               adapter->stats.mcc += er32(MCC);
+               adapter->stats.latecol += er32(LATECOL);
+               adapter->stats.dc += er32(DC);
+       }
        adapter->stats.xonrxc += er32(XONRXC);
        adapter->stats.xontxc += er32(XONTXC);
        adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3307,20 +3745,35 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 
        hw->mac.tx_packet_delta = er32(TPT);
        adapter->stats.tpt += hw->mac.tx_packet_delta;
-       hw->mac.collision_delta = er32(COLC);
+       if ((hw->phy.type == e1000_phy_82578) ||
+           (hw->phy.type == e1000_phy_82577)) {
+               e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+                       hw->mac.collision_delta = phy_data;
+       } else {
+               hw->mac.collision_delta = er32(COLC);
+       }
        adapter->stats.colc += hw->mac.collision_delta;
 
        adapter->stats.algnerrc += er32(ALGNERRC);
        adapter->stats.rxerrc += er32(RXERRC);
-       if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
-               adapter->stats.tncrs += er32(TNCRS);
+       if ((hw->phy.type == e1000_phy_82578) ||
+           (hw->phy.type == e1000_phy_82577)) {
+               e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+               if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+                       adapter->stats.tncrs += phy_data;
+       } else {
+               if ((hw->mac.type != e1000_82574) &&
+                   (hw->mac.type != e1000_82583))
+                       adapter->stats.tncrs += er32(TNCRS);
+       }
        adapter->stats.cexterr += er32(CEXTERR);
        adapter->stats.tsctc += er32(TSCTC);
        adapter->stats.tsctfc += er32(TSCTFC);
 
        /* Fill out the OS statistics structure */
-       adapter->net_stats.multicast = adapter->stats.mprc;
-       adapter->net_stats.collisions = adapter->stats.colc;
+       netdev->stats.multicast = adapter->stats.mprc;
+       netdev->stats.collisions = adapter->stats.colc;
 
        /* Rx Errors */
 
@@ -3328,22 +3781,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
         * RLEC on some newer hardware can be incorrect so build
         * our own version based on RUC and ROC
         */
-       adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+       netdev->stats.rx_errors = adapter->stats.rxerrc +
                adapter->stats.crcerrs + adapter->stats.algnerrc +
                adapter->stats.ruc + adapter->stats.roc +
                adapter->stats.cexterr;
-       adapter->net_stats.rx_length_errors = adapter->stats.ruc +
+       netdev->stats.rx_length_errors = adapter->stats.ruc +
                                              adapter->stats.roc;
-       adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-       adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-       adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+       netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+       netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+       netdev->stats.rx_missed_errors = adapter->stats.mpc;
 
        /* Tx Errors */
-       adapter->net_stats.tx_errors = adapter->stats.ecol +
+       netdev->stats.tx_errors = adapter->stats.ecol +
                                       adapter->stats.latecol;
-       adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
-       adapter->net_stats.tx_window_errors = adapter->stats.latecol;
-       adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+       netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+       netdev->stats.tx_window_errors = adapter->stats.latecol;
+       netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
 
        /* Tx Dropped needs to be maintained elsewhere */
 
@@ -3412,7 +3865,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
               ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
 }
 
-bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000e_has_link(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        bool link_active = 0;
@@ -3493,8 +3946,11 @@ static void e1000_watchdog_task(struct work_struct *work)
        u32 link, tctl;
        int tx_pending = 0;
 
-       link = e1000_has_link(adapter);
+       link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
+               /* Cancel scheduled suspend requests. */
+               pm_runtime_resume(netdev->dev.parent);
+
                e1000e_enable_receives(adapter);
                goto link_up;
        }
@@ -3506,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        bool txb2b = 1;
+
+                       /* Cancel scheduled suspend requests. */
+                       pm_runtime_resume(netdev->dev.parent);
+
                        /* update snapshot of PHY registers on LSC */
                        e1000_phy_read_status(adapter);
                        mac->ops.get_link_up_info(&adapter->hw,
@@ -3533,22 +3993,16 @@ static void e1000_watchdog_task(struct work_struct *work)
                                               "link gets many collisions.\n");
                        }
 
-                       /*
-                        * tweak tx_queue_len according to speed/duplex
-                        * and adjust the timeout factor
-                        */
-                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
                                txb2b = 0;
-                               netdev->tx_queue_len = 10;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = 0;
-                               netdev->tx_queue_len = 100;
-                               /* maybe add some timeout factor ? */
+                               adapter->tx_timeout_factor = 10;
                                break;
                        }
 
@@ -3621,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
 
                        if (adapter->flags & FLAG_RX_NEEDS_RESTART)
                                schedule_work(&adapter->reset_task);
+                       else
+                               pm_schedule_suspend(netdev->dev.parent,
+                                                       LINK_TIMEOUT);
                }
        }
 
@@ -3656,6 +4113,22 @@ link_up:
                }
        }
 
+       /* Simple mode for Interrupt Throttle Rate (ITR) */
+       if (adapter->itr_setting == 4) {
+               /*
+                * Symmetric Tx/Rx gets a reduced ITR=2000;
+                * Total asymmetrical Tx or Rx gets ITR=8000;
+                * everyone else is between 2000-8000.
+                */
+               u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+               u32 dif = (adapter->gotc > adapter->gorc ?
+                           adapter->gotc - adapter->gorc :
+                           adapter->gorc - adapter->gotc) / 10000;
+               u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+               ew32(ITR, 1000000000 / (itr * 256));
+       }
+
        /* Cause software interrupt to ensure Rx ring is cleaned */
        if (adapter->msix_entries)
                ew32(ICS, adapter->rx_ring->ims_val);
@@ -3697,68 +4170,64 @@ static int e1000_tso(struct e1000_adapter *adapter,
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
        int err;
 
-       if (skb_is_gso(skb)) {
-               if (skb_header_cloned(skb)) {
-                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-                       if (err)
-                               return err;
-               }
+       if (!skb_is_gso(skb))
+               return 0;
 
-               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               mss = skb_shinfo(skb)->gso_size;
-               if (skb->protocol == htons(ETH_P_IP)) {
-                       struct iphdr *iph = ip_hdr(skb);
-                       iph->tot_len = 0;
-                       iph->check = 0;
-                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                                iph->daddr, 0,
-                                                                IPPROTO_TCP,
-                                                                0);
-                       cmd_length = E1000_TXD_CMD_IP;
-                       ipcse = skb_transport_offset(skb) - 1;
-               } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr,
-                                                0, IPPROTO_TCP, 0);
-                       ipcse = 0;
-               }
-               ipcss = skb_network_offset(skb);
-               ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
-               tucss = skb_transport_offset(skb);
-               tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
-               tucse = 0;
+       if (skb_header_cloned(skb)) {
+               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               if (err)
+                       return err;
+       }
 
-               cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
-                              E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+       hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       mss = skb_shinfo(skb)->gso_size;
+       if (skb->protocol == htons(ETH_P_IP)) {
+               struct iphdr *iph = ip_hdr(skb);
+               iph->tot_len = 0;
+               iph->check = 0;
+               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        0, IPPROTO_TCP, 0);
+               cmd_length = E1000_TXD_CMD_IP;
+               ipcse = skb_transport_offset(skb) - 1;
+       } else if (skb_is_gso_v6(skb)) {
+               ipv6_hdr(skb)->payload_len = 0;
+               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                      &ipv6_hdr(skb)->daddr,
+                                                      0, IPPROTO_TCP, 0);
+               ipcse = 0;
+       }
+       ipcss = skb_network_offset(skb);
+       ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+       tucss = skb_transport_offset(skb);
+       tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+       tucse = 0;
 
-               i = tx_ring->next_to_use;
-               context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
-               buffer_info = &tx_ring->buffer_info[i];
+       cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+                      E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
 
-               context_desc->lower_setup.ip_fields.ipcss  = ipcss;
-               context_desc->lower_setup.ip_fields.ipcso  = ipcso;
-               context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
-               context_desc->upper_setup.tcp_fields.tucss = tucss;
-               context_desc->upper_setup.tcp_fields.tucso = tucso;
-               context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
-               context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
-               context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
-               context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+       i = tx_ring->next_to_use;
+       context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+       buffer_info = &tx_ring->buffer_info[i];
 
-               buffer_info->time_stamp = jiffies;
-               buffer_info->next_to_watch = i;
+       context_desc->lower_setup.ip_fields.ipcss  = ipcss;
+       context_desc->lower_setup.ip_fields.ipcso  = ipcso;
+       context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+       context_desc->upper_setup.tcp_fields.tucss = tucss;
+       context_desc->upper_setup.tcp_fields.tucso = tucso;
+       context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+       context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+       context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+       context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
-               i++;
-               if (i == tx_ring->count)
-                       i = 0;
-               tx_ring->next_to_use = i;
+       buffer_info->time_stamp = jiffies;
+       buffer_info->next_to_watch = i;
 
-               return 1;
-       }
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+       tx_ring->next_to_use = i;
 
-       return 0;
+       return 1;
 }
 
 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
@@ -3830,23 +4299,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        unsigned int mss)
 {
        struct e1000_ring *tx_ring = adapter->tx_ring;
+       struct pci_dev *pdev = adapter->pdev;
        struct e1000_buffer *buffer_info;
        unsigned int len = skb_headlen(skb);
-       unsigned int offset, size, count = 0, i;
-       unsigned int f;
-       dma_addr_t *map;
+       unsigned int offset = 0, size, count = 0, i;
+       unsigned int f, bytecount, segs;
 
        i = tx_ring->next_to_use;
 
-       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-               adapter->tx_dma_failed++;
-               return 0;
-       }
-
-       map = skb_shinfo(skb)->dma_maps;
-       offset = 0;
-
        while (len) {
                buffer_info = &tx_ring->buffer_info[i];
                size = min(len, max_per_txd);
@@ -3854,11 +4314,16 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                buffer_info->length = size;
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
-               buffer_info->dma = map[0] + offset;
-               count++;
+               buffer_info->dma = dma_map_single(&pdev->dev,
+                                                 skb->data + offset,
+                                                 size, DMA_TO_DEVICE);
+               buffer_info->mapped_as_page = false;
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+                       goto dma_error;
 
                len -= size;
                offset += size;
+               count++;
 
                if (len) {
                        i++;
@@ -3872,7 +4337,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 
                frag = &skb_shinfo(skb)->frags[f];
                len = frag->size;
-               offset = 0;
+               offset = frag->page_offset;
 
                while (len) {
                        i++;
@@ -3885,7 +4350,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        buffer_info->length = size;
                        buffer_info->time_stamp = jiffies;
                        buffer_info->next_to_watch = i;
-                       buffer_info->dma = map[f + 1] + offset;
+                       buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
+                                                       offset, size,
+                                                       DMA_TO_DEVICE);
+                       buffer_info->mapped_as_page = true;
+                       if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+                               goto dma_error;
 
                        len -= size;
                        offset += size;
@@ -3893,10 +4363,32 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                }
        }
 
+       segs = skb_shinfo(skb)->gso_segs ?: 1;
+       /* multiply data chunks by size of headers */
+       bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
        tx_ring->buffer_info[i].skb = skb;
+       tx_ring->buffer_info[i].segs = segs;
+       tx_ring->buffer_info[i].bytecount = bytecount;
        tx_ring->buffer_info[first].next_to_watch = i;
 
        return count;
+
+dma_error:
+       dev_err(&pdev->dev, "TX DMA map failed\n");
+       buffer_info->dma = 0;
+       if (count)
+               count--;
+
+       while (count--) {
+               if (i==0)
+                       i += tx_ring->count;
+               i--;
+               buffer_info = &tx_ring->buffer_info[i];
+               e1000_put_txbuf(adapter, buffer_info);;
+       }
+
+       return 0;
 }
 
 static void e1000_tx_queue(struct e1000_adapter *adapter,
@@ -3969,8 +4461,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
        u16 length, offset;
 
        if (vlan_tx_tag_present(skb)) {
-               if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
-                   && (adapter->hw.mng_cookie.status &
+               if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+                   (adapter->hw.mng_cookie.status &
                        E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
                        return 0;
        }
@@ -4035,7 +4527,8 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
 }
 
 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
-static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+                                   struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_ring *tx_ring = adapter->tx_ring;
@@ -4043,7 +4536,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        unsigned int max_per_txd = E1000_MAX_PER_TXD;
        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
-       unsigned int len = skb->len - skb->data_len;
+       unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
        unsigned int mss;
        int count = 0;
@@ -4093,7 +4586,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                                dev_kfree_skb_any(skb);
                                return NETDEV_TX_OK;
                        }
-                       len = skb->len - skb->data_len;
+                       len = skb_headlen(skb);
                }
        }
 
@@ -4179,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
 
+       e1000e_dump(adapter);
+       e_err("Reset adapter\n");
        e1000e_reinit_locked(adapter);
 }
 
@@ -4191,10 +4686,8 @@ static void e1000_reset_task(struct work_struct *work)
  **/
 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
 {
-       struct e1000_adapter *adapter = netdev_priv(netdev);
-
        /* only return the current stats */
-       return &adapter->net_stats;
+       return &netdev->stats;
 }
 
 /**
@@ -4209,34 +4702,34 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-       if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
-           (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-               e_err("Invalid MTU setting\n");
+       /* Jumbo frame support */
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
                return -EINVAL;
        }
 
-       /* Jumbo frame size limits */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-               if (adapter->hw.phy.type == e1000_phy_ife) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
+       /* Supported frame sizes */
+       if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+           (max_frame > adapter->max_hw_frame_size)) {
+               e_err("Unsupported MTU setting\n");
+               return -EINVAL;
        }
 
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
-       if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-               e_err("MTU > 9216 not supported.\n");
-               return -EINVAL;
+       /* 82573 Errata 17 */
+       if (((adapter->hw.mac.type == e1000_82573) ||
+            (adapter->hw.mac.type == e1000_82574)) &&
+           (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+               adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+               e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
        }
 
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                msleep(1);
-       /* e1000e_down has a dependency on max_frame_size */
+       /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
        adapter->max_frame_size = max_frame;
+       e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+       netdev->mtu = new_mtu;
        if (netif_running(netdev))
                e1000e_down(adapter);
 
@@ -4249,13 +4742,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
         * fragmented skbs
         */
 
-       if (max_frame <= 256)
-               adapter->rx_buffer_len = 256;
-       else if (max_frame <= 512)
-               adapter->rx_buffer_len = 512;
-       else if (max_frame <= 1024)
-               adapter->rx_buffer_len = 1024;
-       else if (max_frame <= 2048)
+       if (max_frame <= 2048)
                adapter->rx_buffer_len = 2048;
        else
                adapter->rx_buffer_len = 4096;
@@ -4266,9 +4753,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
                                         + ETH_FCS_LEN;
 
-       e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
-       netdev->mtu = new_mtu;
-
        if (netif_running(netdev))
                e1000e_up(adapter);
        else
@@ -4293,8 +4777,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
                data->phy_id = adapter->hw.phy.addr;
                break;
        case SIOCGMIIREG:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
+               e1000_phy_read_status(adapter);
+
                switch (data->reg_num & 0x1F) {
                case MII_BMCR:
                        data->val_out = adapter->phy_regs.bmcr;
@@ -4349,13 +4833,90 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        }
 }
 
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 i, mac_reg;
+       u16 phy_reg;
+       int retval = 0;
+
+       /* copy MAC RARs to PHY RARs */
+       for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
+               mac_reg = er32(RAL(i));
+               e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
+               e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+               mac_reg = er32(RAH(i));
+               e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
+               e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
+       }
+
+       /* copy MAC MTA to PHY MTA */
+       for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+               mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+               e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
+               e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
+       }
+
+       /* configure PHY Rx Control register */
+       e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
+       mac_reg = er32(RCTL);
+       if (mac_reg & E1000_RCTL_UPE)
+               phy_reg |= BM_RCTL_UPE;
+       if (mac_reg & E1000_RCTL_MPE)
+               phy_reg |= BM_RCTL_MPE;
+       phy_reg &= ~(BM_RCTL_MO_MASK);
+       if (mac_reg & E1000_RCTL_MO_3)
+               phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+                               << BM_RCTL_MO_SHIFT);
+       if (mac_reg & E1000_RCTL_BAM)
+               phy_reg |= BM_RCTL_BAM;
+       if (mac_reg & E1000_RCTL_PMCF)
+               phy_reg |= BM_RCTL_PMCF;
+       mac_reg = er32(CTRL);
+       if (mac_reg & E1000_CTRL_RFCE)
+               phy_reg |= BM_RCTL_RFCE;
+       e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
+
+       /* enable PHY wakeup in MAC register */
+       ew32(WUFC, wufc);
+       ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+
+       /* configure and enable PHY wakeup in PHY registers */
+       e1e_wphy(&adapter->hw, BM_WUFC, wufc);
+       e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+       /* activate PHY wakeup */
+       retval = hw->phy.ops.acquire(hw);
+       if (retval) {
+               e_err("Could not acquire PHY\n");
+               return retval;
+       }
+       e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
+       retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
+       if (retval) {
+               e_err("Could not read PHY page 769\n");
+               goto out;
+       }
+       phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+       retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+       if (retval)
+               e_err("Could not set PHY Host Wakeup bit\n");
+out:
+       hw->phy.ops.release(hw);
+
+       return retval;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+                           bool runtime)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, ctrl_ext, rctl, status;
-       u32 wufc = adapter->wol;
+       /* Runtime suspend should only enable wakeup for link changes */
+       u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
        int retval = 0;
 
        netif_device_detach(netdev);
@@ -4391,8 +4952,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                #define E1000_CTRL_ADVD3WUC 0x00100000
                /* phy power management enable */
                #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
-               ctrl |= E1000_CTRL_ADVD3WUC |
-                       E1000_CTRL_EN_PHY_PWR_MGMT;
+               ctrl |= E1000_CTRL_ADVD3WUC;
+               if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+                       ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
                ew32(CTRL, ctrl);
 
                if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
@@ -4400,7 +4962,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                    e1000_media_type_internal_serdes) {
                        /* keep the laser running in D3 */
                        ctrl_ext = er32(CTRL_EXT);
-                       ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+                       ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
                        ew32(CTRL_EXT, ctrl_ext);
                }
 
@@ -4410,8 +4972,16 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                /* Allow time for pending master requests to run */
                e1000e_disable_pcie_master(&adapter->hw);
 
-               ew32(WUC, E1000_WUC_PME_EN);
-               ew32(WUFC, wufc);
+               if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+                       /* enable wakeup by the PHY */
+                       retval = e1000_init_phy_wakeup(adapter, wufc);
+                       if (retval)
+                               return retval;
+               } else {
+                       /* enable wakeup by the MAC */
+                       ew32(WUFC, wufc);
+                       ew32(WUC, E1000_WUC_PME_EN);
+               }
        } else {
                ew32(WUC, 0);
                ew32(WUFC, 0);
@@ -4420,7 +4990,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
        *enable_wake = !!wufc;
 
        /* make sure adapter isn't asleep if manageability is enabled */
-       if (adapter->flags & FLAG_MNG_PT_ENABLED)
+       if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+           (hw->mac.ops.check_mng_mode(hw)))
                *enable_wake = true;
 
        if (adapter->hw.phy.type == e1000_phy_igp_3)
@@ -4477,45 +5048,51 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
        }
 }
 
-static void e1000e_disable_l1aspm(struct pci_dev *pdev)
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+       pci_disable_link_state(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
 {
        int pos;
-       u16 val;
+       u16 reg16;
 
        /*
-        * 82573 workaround - disable L1 ASPM on mobile chipsets
-        *
-        * L1 ASPM on various mobile (ich7) chipsets do not behave properly
-        * resulting in lost data or garbage information on the pci-e link
-        * level. This could result in (false) bad EEPROM checksum errors,
-        * long ping times (up to 2s) or even a system freeze/hang.
-        *
-        * Unfortunately this feature saves about 1W power consumption when
-        * active.
+        * Both device and parent should have the same ASPM setting.
+        * Disable ASPM in downstream component first and then upstream.
         */
-       pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-       pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
-       if (val & 0x2) {
-               dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
-               val &= ~0x2;
-               pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
-       }
-}
+       pos = pci_pcie_cap(pdev);
+       pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+       reg16 &= ~state;
+       pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+       if (!pdev->bus->self)
+               return;
 
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+       pos = pci_pcie_cap(pdev->bus->self);
+       pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+       reg16 &= ~state;
+       pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
 {
-       int retval;
-       bool wake;
+       dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+                (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+                (state & PCIE_LINK_STATE_L1) ? "L1" : "");
 
-       retval = __e1000_shutdown(pdev, &wake);
-       if (!retval)
-               e1000_complete_shutdown(pdev, true, wake);
+       __e1000e_disable_aspm(pdev, state);
+}
 
-       return retval;
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
+{
+       return !!adapter->tx_ring->buffer_info;
 }
 
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4524,27 +5101,9 @@ static int e1000_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
-       e1000e_disable_l1aspm(pdev);
-
-       err = pci_enable_device_mem(pdev);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "Cannot enable PCI device from suspend\n");
-               return err;
-       }
-
-       /* AER (Advanced Error Reporting) hooks */
-       err = pci_enable_pcie_error_reporting(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
-                                   "0x%x\n", err);
-               /* non-fatal, continue */
-       }
-
-       pci_set_master(pdev);
-
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
+       pci_save_state(pdev);
+       if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
 
        e1000e_set_interrupt_capability(adapter);
        if (netif_running(netdev)) {
@@ -4554,10 +5113,39 @@ static int e1000_resume(struct pci_dev *pdev)
        }
 
        e1000e_power_up_phy(adapter);
+
+       /* report the system wakeup cause from S3/S4 */
+       if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+               u16 phy_data;
+
+               e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
+               if (phy_data) {
+                       e_info("PHY Wakeup cause - %s\n",
+                               phy_data & E1000_WUS_EX ? "Unicast Packet" :
+                               phy_data & E1000_WUS_MC ? "Multicast Packet" :
+                               phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+                               phy_data & E1000_WUS_MAG ? "Magic Packet" :
+                               phy_data & E1000_WUS_LNKC ? "Link Status "
+                               " Change" : "other");
+               }
+               e1e_wphy(&adapter->hw, BM_WUS, ~0);
+       } else {
+               u32 wus = er32(WUS);
+               if (wus) {
+                       e_info("MAC Wakeup cause - %s\n",
+                               wus & E1000_WUS_EX ? "Unicast Packet" :
+                               wus & E1000_WUS_MC ? "Multicast Packet" :
+                               wus & E1000_WUS_BC ? "Broadcast Packet" :
+                               wus & E1000_WUS_MAG ? "Magic Packet" :
+                               wus & E1000_WUS_LNKC ? "Link Status Change" :
+                               "other");
+               }
+               ew32(WUS, ~0);
+       }
+
        e1000e_reset(adapter);
-       ew32(WUS, ~0);
 
-       e1000_init_manageability(adapter);
+       e1000_init_manageability_pt(adapter);
 
        if (netif_running(netdev))
                e1000e_up(adapter);
@@ -4574,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int retval;
+       bool wake;
+
+       retval = __e1000_shutdown(pdev, &wake, false);
+       if (!retval)
+               e1000_complete_shutdown(pdev, true, wake);
+
+       return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (e1000e_pm_ready(adapter))
+               adapter->idle_check = true;
+
+       return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (e1000e_pm_ready(adapter)) {
+               bool wake;
+
+               __e1000_shutdown(pdev, &wake, true);
+       }
+
+       return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (!e1000e_pm_ready(adapter))
+               return 0;
+
+       if (adapter->idle_check) {
+               adapter->idle_check = false;
+               if (!e1000e_has_link(adapter))
+                       pm_schedule_suspend(dev, MSEC_PER_SEC);
+       }
+
+       return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (!e1000e_pm_ready(adapter))
+               return 0;
+
+       adapter->idle_check = !dev->power.runtime_auto;
+       return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
        bool wake = false;
 
-       __e1000_shutdown(pdev, &wake);
+       __e1000_shutdown(pdev, &wake, false);
 
        if (system_state == SYSTEM_POWER_OFF)
                e1000_complete_shutdown(pdev, false, wake);
@@ -4619,6 +5282,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 
        netif_device_detach(netdev);
 
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
        if (netif_running(netdev))
                e1000e_down(adapter);
        pci_disable_device(pdev);
@@ -4642,7 +5308,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
        int err;
        pci_ers_result_t result;
 
-       e1000e_disable_l1aspm(pdev);
+       if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
        err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
@@ -4650,6 +5317,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_set_master(pdev);
+               pdev->state_saved = true;
                pci_restore_state(pdev);
 
                pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4678,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       e1000_init_manageability(adapter);
+       e1000_init_manageability_pt(adapter);
 
        if (netif_running(netdev)) {
                if (e1000e_up(adapter)) {
@@ -4735,13 +5403,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
                dev_warn(&adapter->pdev->dev,
                         "Warning: detected DSPD enabled in EEPROM\n");
        }
-
-       ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
-       if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
-               /* ASPM enable */
-               dev_warn(&adapter->pdev->dev,
-                        "Warning: detected ASPM enabled in EEPROM\n");
-       }
 }
 
 static const struct net_device_ops e1000e_netdev_ops = {
@@ -4790,23 +5451,24 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        u16 eeprom_data = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 
-       e1000e_disable_l1aspm(pdev);
+       if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
 
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
        pci_using_dac = 0;
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
                if (!err)
                        pci_using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = pci_set_consistent_dma_mask(pdev,
-                                                         DMA_BIT_MASK(32));
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   DMA_BIT_MASK(32));
                        if (err) {
                                dev_err(&pdev->dev, "No usable DMA "
                                        "configuration, aborting\n");
@@ -4821,6 +5483,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        if (err)
                goto err_pci_reg;
 
+       /* AER (Advanced Error Reporting) hooks */
+       pci_enable_pcie_error_reporting(pdev);
+
        pci_set_master(pdev);
        /* PCI config space info */
        err = pci_save_state(pdev);
@@ -4834,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
+       netdev->irq = pdev->irq;
+
        pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
        hw = &adapter->hw;
@@ -4845,6 +5512,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        adapter->flags2 = ei->flags2;
        adapter->hw.adapter = adapter;
        adapter->hw.mac.type = ei->mac;
+       adapter->max_hw_frame_size = ei->max_hw_frame_size;
        adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
 
        mmio_start = pci_resource_start(pdev, 0);
@@ -4955,7 +5623,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        e1000_eeprom_checks(adapter);
 
-       /* copy the MAC address out of the NVM */
+       /* copy the MAC address */
        if (e1000e_read_mac_addr(&adapter->hw))
                e_err("NVM Read Error while reading MAC address\n");
 
@@ -4980,6 +5648,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
        INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
        INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+       INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
 
        /* Initialize link parameters. User can change them with ethtool */
        adapter->hw.mac.autoneg = 1;
@@ -5000,6 +5669,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                /* APME bit in EEPROM is mapped to WUC.APME */
                eeprom_data = er32(WUC);
                eeprom_apme_mask = E1000_WUC_APME;
+               if (eeprom_data & E1000_WUC_PHY_WAKE)
+                       adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
                    (adapter->hw.bus.func == 1))
@@ -5050,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        e1000_print_device_info(adapter);
 
+       if (pci_dev_run_wake(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
+       pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
        return 0;
 
 err_register:
@@ -5092,29 +5769,46 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int err;
+       bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+       pm_runtime_get_sync(&pdev->dev);
 
        /*
         * flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled
         */
-       set_bit(__E1000_DOWN, &adapter->state);
+       if (!down)
+               set_bit(__E1000_DOWN, &adapter->state);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
+       cancel_work_sync(&adapter->reset_task);
+       cancel_work_sync(&adapter->watchdog_task);
+       cancel_work_sync(&adapter->downshift_task);
+       cancel_work_sync(&adapter->update_phy_task);
+       cancel_work_sync(&adapter->print_hang_task);
        flush_scheduled_work();
 
+       if (!(netdev->flags & IFF_UP))
+               e1000_power_down_phy(adapter);
+
+       /* Don't lie to e1000_close() down the road. */
+       if (!down)
+               clear_bit(__E1000_DOWN, &adapter->state);
+       unregister_netdev(netdev);
+
+       if (pci_dev_run_wake(pdev)) {
+               pm_runtime_disable(&pdev->dev);
+               pm_runtime_set_suspended(&pdev->dev);
+       }
+       pm_runtime_put_noidle(&pdev->dev);
+
        /*
         * Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
         */
        e1000_release_hw_control(adapter);
 
-       unregister_netdev(netdev);
-
-       if (!e1000_check_reset_block(&adapter->hw))
-               e1000_phy_hw_reset(&adapter->hw);
-
        e1000e_reset_interrupt_capability(adapter);
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
@@ -5128,10 +5822,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
        free_netdev(netdev);
 
        /* AER disable */
-       err = pci_disable_pcie_error_reporting(pdev);
-       if (err)
-               dev_err(&pdev->dev,
-                       "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+       pci_disable_pcie_error_reporting(pdev);
 
        pci_disable_device(pdev);
 }
@@ -5143,7 +5834,7 @@ static struct pci_error_handlers e1000_err_handler = {
        .resume = e1000_io_resume,
 };
 
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -5183,6 +5874,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
 
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
@@ -5200,21 +5892,33 @@ static struct pci_device_id e1000_pci_tbl[] = {
 
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
+
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
 
        { }     /* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
 
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+       SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+                               e1000_runtime_resume, e1000_idle)
+};
+#endif
+
 /* PCI Device API Driver */
 static struct pci_driver e1000_driver = {
        .name     = e1000e_driver_name,
        .id_table = e1000_pci_tbl,
        .probe    = e1000_probe,
        .remove   = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
-       /* Power Management Hooks */
-       .suspend  = e1000_suspend,
-       .resume   = e1000_resume,
+#ifdef CONFIG_PM_OPS
+       .driver.pm = &e1000_pm_ops,
 #endif
        .shutdown = e1000_shutdown,
        .err_handler = &e1000_err_handler
@@ -5229,14 +5933,11 @@ static struct pci_driver e1000_driver = {
 static int __init e1000_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
-              e1000e_driver_name, e1000e_driver_version);
-       printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
-              e1000e_driver_name);
+       pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+               e1000e_driver_version);
+       pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
-       pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
-                              PM_QOS_DEFAULT_VALUE);
-                               
+
        return ret;
 }
 module_init(e1000_init_module);
@@ -5250,7 +5951,6 @@ module_init(e1000_init_module);
 static void __exit e1000_exit_module(void)
 {
        pci_unregister_driver(&e1000_driver);
-       pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
 }
 module_exit(e1000_exit_module);