Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
[safe/jmp/linux-2.6] / drivers / net / e1000e / netdev.c
index d57880e..24507f3 100644 (file)
@@ -26,6 +26,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -36,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/tcp.h>
 #include <linux/ipv6.h>
+#include <linux/slab.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
 #include <linux/mii.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
 #include <linux/aer.h>
 
 #include "e1000.h"
 
-#define DRV_VERSION "1.0.2-k2"
+#define DRV_VERSION "1.0.2-k4"
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -65,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
        [board_pchlan]          = &e1000_pch_info,
 };
 
+struct e1000_reg_info {
+       u32 ofs;
+       char *name;
+};
+
+#define E1000_RDFH     0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT     0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS    0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS    0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC    0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH     0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT     0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+       /* General Registers */
+       {E1000_CTRL, "CTRL"},
+       {E1000_STATUS, "STATUS"},
+       {E1000_CTRL_EXT, "CTRL_EXT"},
+
+       /* Interrupt Registers */
+       {E1000_ICR, "ICR"},
+
+       /* RX Registers */
+       {E1000_RCTL, "RCTL"},
+       {E1000_RDLEN, "RDLEN"},
+       {E1000_RDH, "RDH"},
+       {E1000_RDT, "RDT"},
+       {E1000_RDTR, "RDTR"},
+       {E1000_RXDCTL(0), "RXDCTL"},
+       {E1000_ERT, "ERT"},
+       {E1000_RDBAL, "RDBAL"},
+       {E1000_RDBAH, "RDBAH"},
+       {E1000_RDFH, "RDFH"},
+       {E1000_RDFT, "RDFT"},
+       {E1000_RDFHS, "RDFHS"},
+       {E1000_RDFTS, "RDFTS"},
+       {E1000_RDFPC, "RDFPC"},
+
+       /* TX Registers */
+       {E1000_TCTL, "TCTL"},
+       {E1000_TDBAL, "TDBAL"},
+       {E1000_TDBAH, "TDBAH"},
+       {E1000_TDLEN, "TDLEN"},
+       {E1000_TDH, "TDH"},
+       {E1000_TDT, "TDT"},
+       {E1000_TIDV, "TIDV"},
+       {E1000_TXDCTL(0), "TXDCTL"},
+       {E1000_TADV, "TADV"},
+       {E1000_TARC(0), "TARC"},
+       {E1000_TDFH, "TDFH"},
+       {E1000_TDFT, "TDFT"},
+       {E1000_TDFHS, "TDFHS"},
+       {E1000_TDFTS, "TDFTS"},
+       {E1000_TDFPC, "TDFPC"},
+
+       /* List Terminator */
+       {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+       int n = 0;
+       char rname[16];
+       u32 regs[8];
+
+       switch (reginfo->ofs) {
+       case E1000_RXDCTL(0):
+               for (n = 0; n < 2; n++)
+                       regs[n] = __er32(hw, E1000_RXDCTL(n));
+               break;
+       case E1000_TXDCTL(0):
+               for (n = 0; n < 2; n++)
+                       regs[n] = __er32(hw, E1000_TXDCTL(n));
+               break;
+       case E1000_TARC(0):
+               for (n = 0; n < 2; n++)
+                       regs[n] = __er32(hw, E1000_TARC(n));
+               break;
+       default:
+               printk(KERN_INFO "%-15s %08x\n",
+                       reginfo->name, __er32(hw, reginfo->ofs));
+               return;
+       }
+
+       snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+       printk(KERN_INFO "%-15s ", rname);
+       for (n = 0; n < 2; n++)
+               printk(KERN_CONT "%08x ", regs[n]);
+       printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_reg_info *reginfo;
+       struct e1000_ring *tx_ring = adapter->tx_ring;
+       struct e1000_tx_desc *tx_desc;
+       struct my_u0 { u64 a; u64 b; } *u0;
+       struct e1000_buffer *buffer_info;
+       struct e1000_ring *rx_ring = adapter->rx_ring;
+       union e1000_rx_desc_packet_split *rx_desc_ps;
+       struct e1000_rx_desc *rx_desc;
+       struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+       u32 staterr;
+       int i = 0;
+
+       if (!netif_msg_hw(adapter))
+               return;
+
+       /* Print netdevice Info */
+       if (netdev) {
+               dev_info(&adapter->pdev->dev, "Net device Info\n");
+               printk(KERN_INFO "Device Name     state            "
+                       "trans_start      last_rx\n");
+               printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+                       netdev->name,
+                       netdev->state,
+                       netdev->trans_start,
+                       netdev->last_rx);
+       }
+
+       /* Print Registers */
+       dev_info(&adapter->pdev->dev, "Register Dump\n");
+       printk(KERN_INFO " Register Name   Value\n");
+       for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+            reginfo->name; reginfo++) {
+               e1000_regdump(hw, reginfo);
+       }
+
+       /* Print TX Ring Summary */
+       if (!netdev || !netif_running(netdev))
+               goto exit;
+
+       dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+       printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
+               " leng ntw timestamp\n");
+       buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+       printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+               0, tx_ring->next_to_use, tx_ring->next_to_clean,
+               (u64)buffer_info->dma,
+               buffer_info->length,
+               buffer_info->next_to_watch,
+               (u64)buffer_info->time_stamp);
+
+       /* Print TX Rings */
+       if (!netif_msg_tx_done(adapter))
+               goto rx_ring_summary;
+
+       dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+       /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+        *
+        * Legacy Transmit Descriptor
+        *   +--------------------------------------------------------------+
+        * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
+        *   +--------------------------------------------------------------+
+        * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
+        *   +--------------------------------------------------------------+
+        *   63       48 47        36 35    32 31     24 23    16 15        0
+        *
+        * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+        *   63      48 47    40 39       32 31             16 15    8 7      0
+        *   +----------------------------------------------------------------+
+        * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
+        *   +----------------------------------------------------------------+
+        * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
+        *   +----------------------------------------------------------------+
+        *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
+        *
+        * Extended Data Descriptor (DTYP=0x1)
+        *   +----------------------------------------------------------------+
+        * 0 |                     Buffer Address [63:0]                      |
+        *   +----------------------------------------------------------------+
+        * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
+        *   +----------------------------------------------------------------+
+        *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
+        */
+       printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
+               " [bi->dma       ] leng  ntw timestamp        bi->skb "
+               "<-- Legacy format\n");
+       printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+               " [bi->dma       ] leng  ntw timestamp        bi->skb "
+               "<-- Ext Context format\n");
+       printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
+               " [bi->dma       ] leng  ntw timestamp        bi->skb "
+               "<-- Ext Data format\n");
+       for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+               tx_desc = E1000_TX_DESC(*tx_ring, i);
+               buffer_info = &tx_ring->buffer_info[i];
+               u0 = (struct my_u0 *)tx_desc;
+               printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
+                       "%04X  %3X %016llX %p",
+                      (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+                       ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+                      le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+                      (u64)buffer_info->dma, buffer_info->length,
+                      buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+                      buffer_info->skb);
+               if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+                       printk(KERN_CONT " NTC/U\n");
+               else if (i == tx_ring->next_to_use)
+                       printk(KERN_CONT " NTU\n");
+               else if (i == tx_ring->next_to_clean)
+                       printk(KERN_CONT " NTC\n");
+               else
+                       printk(KERN_CONT "\n");
+
+               if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+                       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+                                       16, 1, phys_to_virt(buffer_info->dma),
+                                       buffer_info->length, true);
+       }
+
+       /* Print RX Rings Summary */
+rx_ring_summary:
+       dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+       printk(KERN_INFO "Queue [NTU] [NTC]\n");
+       printk(KERN_INFO " %5d %5X %5X\n", 0,
+               rx_ring->next_to_use, rx_ring->next_to_clean);
+
+       /* Print RX Rings */
+       if (!netif_msg_rx_status(adapter))
+               goto exit;
+
+       dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+       switch (adapter->rx_ps_pages) {
+       case 1:
+       case 2:
+       case 3:
+               /* [Extended] Packet Split Receive Descriptor Format
+                *
+                *    +-----------------------------------------------------+
+                *  0 |                Buffer Address 0 [63:0]              |
+                *    +-----------------------------------------------------+
+                *  8 |                Buffer Address 1 [63:0]              |
+                *    +-----------------------------------------------------+
+                * 16 |                Buffer Address 2 [63:0]              |
+                *    +-----------------------------------------------------+
+                * 24 |                Buffer Address 3 [63:0]              |
+                *    +-----------------------------------------------------+
+                */
+               printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
+                       "[buffer 1 63:0 ] "
+                      "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
+                      "[bi->skb] <-- Ext Pkt Split format\n");
+               /* [Extended] Receive Descriptor (Write-Back) Format
+                *
+                *   63       48 47    32 31     13 12    8 7    4 3        0
+                *   +------------------------------------------------------+
+                * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
+                *   | Checksum | Ident  |         | Queue |      |  Type   |
+                *   +------------------------------------------------------+
+                * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+                *   +------------------------------------------------------+
+                *   63       48 47    32 31            20 19               0
+                */
+               printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
+                       "[vl   l0 ee  es] "
+                      "[ l3  l2  l1 hs] [reserved      ] ---------------- "
+                      "[bi->skb] <-- Ext Rx Write-Back format\n");
+               for (i = 0; i < rx_ring->count; i++) {
+                       buffer_info = &rx_ring->buffer_info[i];
+                       rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+                       u1 = (struct my_u1 *)rx_desc_ps;
+                       staterr =
+                               le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+                       if (staterr & E1000_RXD_STAT_DD) {
+                               /* Descriptor Done */
+                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
+                                       "%016llX %016llX %016llX "
+                                       "---------------- %p", i,
+                                       le64_to_cpu(u1->a),
+                                       le64_to_cpu(u1->b),
+                                       le64_to_cpu(u1->c),
+                                       le64_to_cpu(u1->d),
+                                       buffer_info->skb);
+                       } else {
+                               printk(KERN_INFO "R  [0x%03X]     %016llX "
+                                       "%016llX %016llX %016llX %016llX %p", i,
+                                       le64_to_cpu(u1->a),
+                                       le64_to_cpu(u1->b),
+                                       le64_to_cpu(u1->c),
+                                       le64_to_cpu(u1->d),
+                                       (u64)buffer_info->dma,
+                                       buffer_info->skb);
+
+                               if (netif_msg_pktdata(adapter))
+                                       print_hex_dump(KERN_INFO, "",
+                                               DUMP_PREFIX_ADDRESS, 16, 1,
+                                               phys_to_virt(buffer_info->dma),
+                                               adapter->rx_ps_bsize0, true);
+                       }
+
+                       if (i == rx_ring->next_to_use)
+                               printk(KERN_CONT " NTU\n");
+                       else if (i == rx_ring->next_to_clean)
+                               printk(KERN_CONT " NTC\n");
+                       else
+                               printk(KERN_CONT "\n");
+               }
+               break;
+       default:
+       case 0:
+               /* Legacy Receive Descriptor Format
+                *
+                * +-----------------------------------------------------+
+                * |                Buffer Address [63:0]                |
+                * +-----------------------------------------------------+
+                * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+                * +-----------------------------------------------------+
+                * 63       48 47    40 39      32 31         16 15      0
+                */
+               printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
+                       "[vl er S cks ln] [bi->dma       ] [bi->skb] "
+                       "<-- Legacy format\n");
+               for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+                       rx_desc = E1000_RX_DESC(*rx_ring, i);
+                       buffer_info = &rx_ring->buffer_info[i];
+                       u0 = (struct my_u0 *)rx_desc;
+                       printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
+                               "%016llX %p",
+                               i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+                               (u64)buffer_info->dma, buffer_info->skb);
+                       if (i == rx_ring->next_to_use)
+                               printk(KERN_CONT " NTU\n");
+                       else if (i == rx_ring->next_to_clean)
+                               printk(KERN_CONT " NTC\n");
+                       else
+                               printk(KERN_CONT "\n");
+
+                       if (netif_msg_pktdata(adapter))
+                               print_hex_dump(KERN_INFO, "",
+                                       DUMP_PREFIX_ADDRESS,
+                                       16, 1, phys_to_virt(buffer_info->dma),
+                                       adapter->rx_buffer_len, true);
+               }
+       }
+
+exit:
+       return;
+}
+
 /**
  * e1000_desc_unused - calculate if we have unused descriptors
  **/
@@ -177,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
                buffer_info->skb = skb;
 map_skb:
-               buffer_info->dma = pci_map_single(pdev, skb->data,
+               buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
                                                  adapter->rx_buffer_len,
-                                                 PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+                                                 DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        break;
@@ -189,26 +548,23 @@ map_skb:
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 
+               if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+                       /*
+                        * Force memory writes to complete before letting h/w
+                        * know there are new descriptors to fetch.  (Only
+                        * applicable for weak-ordered memory model archs,
+                        * such as IA-64).
+                        */
+                       wmb();
+                       writel(i, adapter->hw.hw_addr + rx_ring->tail);
+               }
                i++;
                if (i == rx_ring->count)
                        i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               /*
-                * Force memory writes to complete before letting h/w
-                * know there are new descriptors to fetch.  (Only
-                * applicable for weak-ordered memory model archs,
-                * such as IA-64).
-                */
-               wmb();
-               writel(i, adapter->hw.hw_addr + rx_ring->tail);
-       }
+       rx_ring->next_to_use = i;
 }
 
 /**
@@ -246,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                        adapter->alloc_rx_buff_failed++;
                                        goto no_buffers;
                                }
-                               ps_page->dma = pci_map_page(pdev,
-                                                  ps_page->page,
-                                                  0, PAGE_SIZE,
-                                                  PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+                               ps_page->dma = dma_map_page(&pdev->dev,
+                                                           ps_page->page,
+                                                           0, PAGE_SIZE,
+                                                           DMA_FROM_DEVICE);
+                               if (dma_mapping_error(&pdev->dev,
+                                                     ps_page->dma)) {
                                        dev_err(&adapter->pdev->dev,
                                          "RX DMA page map failed\n");
                                        adapter->rx_dma_failed++;
@@ -275,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                }
 
                buffer_info->skb = skb;
-               buffer_info->dma = pci_map_single(pdev, skb->data,
+               buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
                                                  adapter->rx_ps_bsize0,
-                                                 PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+                                                 DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        /* cleanup skb */
@@ -289,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
                rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 
+               if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+                       /*
+                        * Force memory writes to complete before letting h/w
+                        * know there are new descriptors to fetch.  (Only
+                        * applicable for weak-ordered memory model archs,
+                        * such as IA-64).
+                        */
+                       wmb();
+                       writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+               }
+
                i++;
                if (i == rx_ring->count)
                        i = 0;
@@ -296,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
        }
 
 no_buffers:
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-
-               if (!(i--))
-                       i = (rx_ring->count - 1);
-
-               /*
-                * Force memory writes to complete before letting h/w
-                * know there are new descriptors to fetch.  (Only
-                * applicable for weak-ordered memory model archs,
-                * such as IA-64).
-                */
-               wmb();
-               /*
-                * Hardware increments by 16 bytes, but packet split
-                * descriptors are 32 bytes...so we increment tail
-                * twice as much.
-                */
-               writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
-       }
+       rx_ring->next_to_use = i;
 }
 
 /**
@@ -365,10 +714,10 @@ check_page:
                }
 
                if (!buffer_info->dma)
-                       buffer_info->dma = pci_map_page(pdev,
+                       buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
                                                        PAGE_SIZE,
-                                                       PCI_DMA_FROMDEVICE);
+                                                       DMA_FROM_DEVICE);
 
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -442,21 +791,31 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                cleaned = 1;
                cleaned_count++;
-               pci_unmap_single(pdev,
+               dma_unmap_single(&pdev->dev,
                                 buffer_info->dma,
                                 adapter->rx_buffer_len,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
                length = le16_to_cpu(rx_desc->length);
 
-               /* !EOP means multiple descriptors were used to store a single
-                * packet, also make sure the frame isn't just CRC only */
-               if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+               /*
+                * !EOP means multiple descriptors were used to store a single
+                * packet, if that's the case we need to toss it.  In fact, we
+                * need to toss every packet with the EOP bit clear and the
+                * next frame that _does_ have the EOP bit set, as it is by
+                * definition only a frame fragment
+                */
+               if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+                       adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+               if (adapter->flags2 & FLAG2_IS_DISCARDING) {
                        /* All receives must fit into a single buffer */
                        e_dbg("Receive packet consumed multiple buffers\n");
                        /* recycle */
                        buffer_info->skb = skb;
+                       if (status & E1000_RXD_STAT_EOP)
+                               adapter->flags2 &= ~FLAG2_IS_DISCARDING;
                        goto next_desc;
                }
 
@@ -534,10 +893,16 @@ next_desc:
 static void e1000_put_txbuf(struct e1000_adapter *adapter,
                             struct e1000_buffer *buffer_info)
 {
-       buffer_info->dma = 0;
+       if (buffer_info->dma) {
+               if (buffer_info->mapped_as_page)
+                       dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+                                      buffer_info->length, DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+                                        buffer_info->length, DMA_TO_DEVICE);
+               buffer_info->dma = 0;
+       }
        if (buffer_info->skb) {
-               skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
-                             DMA_TO_DEVICE);
                dev_kfree_skb_any(buffer_info->skb);
                buffer_info->skb = NULL;
        }
@@ -625,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                        cleaned = (i == eop);
 
                        if (cleaned) {
-                               struct sk_buff *skb = buffer_info->skb;
-                               unsigned int segs, bytecount;
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
-                               total_tx_packets += segs;
-                               total_tx_bytes += bytecount;
+                               total_tx_packets += buffer_info->segs;
+                               total_tx_bytes += buffer_info->bytecount;
                        }
 
                        e1000_put_txbuf(adapter, buffer_info);
@@ -643,6 +1002,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                                i = 0;
                }
 
+               if (i == tx_ring->next_to_use)
+                       break;
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
@@ -672,8 +1033,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                adapter->detect_tx_hung = 0;
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
-                              + (adapter->tx_timeout_factor * HZ))
-                   && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+                              + (adapter->tx_timeout_factor * HZ)) &&
+                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
                        schedule_work(&adapter->print_hang_task);
                        netif_stop_queue(netdev);
                }
@@ -733,15 +1094,21 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 
                cleaned = 1;
                cleaned_count++;
-               pci_unmap_single(pdev, buffer_info->dma,
+               dma_unmap_single(&pdev->dev, buffer_info->dma,
                                 adapter->rx_ps_bsize0,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
-               if (!(staterr & E1000_RXD_STAT_EOP)) {
+               /* see !EOP comment in other rx routine */
+               if (!(staterr & E1000_RXD_STAT_EOP))
+                       adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+               if (adapter->flags2 & FLAG2_IS_DISCARDING) {
                        e_dbg("Packet Split buffers didn't pick up the full "
                              "packet\n");
                        dev_kfree_skb_irq(skb);
+                       if (staterr & E1000_RXD_STAT_EOP)
+                               adapter->flags2 &= ~FLAG2_IS_DISCARDING;
                        goto next_desc;
                }
 
@@ -785,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                         * kmap_atomic, so we can't hold the mapping
                         * very long
                         */
-                       pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+                                               PAGE_SIZE, DMA_FROM_DEVICE);
                        vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
                        memcpy(skb_tail_pointer(skb), vaddr, l1);
                        kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
-                       pci_dma_sync_single_for_device(pdev, ps_page->dma,
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+                                                  PAGE_SIZE, DMA_FROM_DEVICE);
 
                        /* remove the CRC */
                        if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -808,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                break;
 
                        ps_page = &buffer_info->ps_pages[j];
-                       pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+                                      DMA_FROM_DEVICE);
                        ps_page->dma = 0;
                        skb_fill_page_desc(skb, j, ps_page->page, 0, length);
                        ps_page->page = NULL;
@@ -927,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 
                cleaned = true;
                cleaned_count++;
-               pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
-                              PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+                              DMA_FROM_DEVICE);
                buffer_info->dma = 0;
 
                length = le16_to_cpu(rx_desc->length);
@@ -1064,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
                        if (adapter->clean_rx == e1000_clean_rx_irq)
-                               pci_unmap_single(pdev, buffer_info->dma,
+                               dma_unmap_single(&pdev->dev, buffer_info->dma,
                                                 adapter->rx_buffer_len,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
-                               pci_unmap_page(pdev, buffer_info->dma,
+                               dma_unmap_page(&pdev->dev, buffer_info->dma,
                                               PAGE_SIZE,
-                                              PCI_DMA_FROMDEVICE);
+                                              DMA_FROM_DEVICE);
                        else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
-                               pci_unmap_single(pdev, buffer_info->dma,
+                               dma_unmap_single(&pdev->dev, buffer_info->dma,
                                                 adapter->rx_ps_bsize0,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        buffer_info->dma = 0;
                }
 
@@ -1092,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
                        ps_page = &buffer_info->ps_pages[j];
                        if (!ps_page->page)
                                break;
-                       pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+                                      DMA_FROM_DEVICE);
                        ps_page->dma = 0;
                        put_page(ps_page->page);
                        ps_page->page = NULL;
@@ -1111,6 +1478,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
+       adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 
        writel(0, adapter->hw.hw_addr + rx_ring->head);
        writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -1399,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
                pci_disable_msi(adapter->pdev);
                adapter->flags &= ~FLAG_MSI_ENABLED;
        }
-
-       return;
 }
 
 /**
@@ -1452,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                /* Don't do anything; this is the system default */
                break;
        }
-
-       return;
 }
 
 /**
@@ -2158,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
        }
 }
 
-static void e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 manc, manc2h;
+       u32 manc, manc2h, mdef, i, j;
 
        if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
                return;
@@ -2175,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
         */
        manc |= E1000_MANC_EN_MNG2HOST;
        manc2h = er32(MANC2H);
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
-       manc2h |= E1000_MNG2HOST_PORT_623;
-       manc2h |= E1000_MNG2HOST_PORT_664;
+
+       switch (hw->mac.type) {
+       default:
+               manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+               break;
+       case e1000_82574:
+       case e1000_82583:
+               /*
+                * Check if IPMI pass-through decision filter already exists;
+                * if so, enable it.
+                */
+               for (i = 0, j = 0; i < 8; i++) {
+                       mdef = er32(MDEF(i));
+
+                       /* Ignore filters with anything other than IPMI ports */
+                       if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+                               continue;
+
+                       /* Enable this decision filter in MANC2H */
+                       if (mdef)
+                               manc2h |= (1 << i);
+
+                       j |= mdef;
+               }
+
+               if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+                       break;
+
+               /* Create new decision filter in an empty filter */
+               for (i = 0, j = 0; i < 8; i++)
+                       if (er32(MDEF(i)) == 0) {
+                               ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+                                              E1000_MDEF_PORT_664));
+                               manc2h |= (1 << 1);
+                               j++;
+                               break;
+                       }
+
+               if (!j)
+                       e_warn("Unable to create IPMI pass-through filter\n");
+               break;
+       }
+
        ew32(MANC2H, manc2h);
        ew32(MANC, manc);
 }
@@ -2265,8 +2668,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        ew32(TCTL, tctl);
 
        e1000e_config_collision_dist(hw);
-
-       adapter->tx_queue_len = adapter->netdev->tx_queue_len;
 }
 
 /**
@@ -2326,18 +2727,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        rctl &= ~E1000_RCTL_SZ_4096;
        rctl |= E1000_RCTL_BSEX;
        switch (adapter->rx_buffer_len) {
-       case 256:
-               rctl |= E1000_RCTL_SZ_256;
-               rctl &= ~E1000_RCTL_BSEX;
-               break;
-       case 512:
-               rctl |= E1000_RCTL_SZ_512;
-               rctl &= ~E1000_RCTL_BSEX;
-               break;
-       case 1024:
-               rctl |= E1000_RCTL_SZ_1024;
-               rctl &= ~E1000_RCTL_BSEX;
-               break;
        case 2048:
        default:
                rctl |= E1000_RCTL_SZ_2048;
@@ -2511,12 +2900,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
                         * excessive C-state transition latencies result in
                         * dropped transactions.
                         */
-                       pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
-                                                 adapter->netdev->name, 55);
+                       pm_qos_update_request(
+                               adapter->netdev->pm_qos_req, 55);
                } else {
-                       pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
-                                                 adapter->netdev->name,
-                                                 PM_QOS_DEFAULT_VALUE);
+                       pm_qos_update_request(
+                               adapter->netdev->pm_qos_req,
+                               PM_QOS_DEFAULT_VALUE);
                }
        }
 
@@ -2529,22 +2918,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
  *  @hw: pointer to the HW structure
  *  @mc_addr_list: array of multicast addresses to program
  *  @mc_addr_count: number of multicast addresses to program
- *  @rar_used_count: the first RAR register free to program
- *  @rar_count: total number of supported Receive Address Registers
  *
- *  Updates the Receive Address Registers and Multicast Table Array.
+ *  Updates the Multicast Table Array.
  *  The caller must have a packed mc_addr_list of multicast addresses.
- *  The parameter rar_count will usually be hw->mac.rar_entry_count
- *  unless there are workarounds that change this.  Currently no func pointer
- *  exists and all implementations are handled in the generic version of this
- *  function.
  **/
 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
-                                     u32 mc_addr_count, u32 rar_used_count,
-                                     u32 rar_count)
+                                     u32 mc_addr_count)
 {
-       hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
-                                       rar_used_count, rar_count);
+       hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
 }
 
 /**
@@ -2560,8 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_mac_info *mac = &hw->mac;
-       struct dev_mc_list *mc_ptr;
+       struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
        int i;
@@ -2586,31 +2966,24 @@ static void e1000_set_multi(struct net_device *netdev)
 
        ew32(RCTL, rctl);
 
-       if (netdev->mc_count) {
-               mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+       if (!netdev_mc_empty(netdev)) {
+               mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
                if (!mta_list)
                        return;
 
                /* prepare a packed array of only addresses. */
-               mc_ptr = netdev->mc_list;
-
-               for (i = 0; i < netdev->mc_count; i++) {
-                       if (!mc_ptr)
-                               break;
-                       memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
-                              ETH_ALEN);
-                       mc_ptr = mc_ptr->next;
-               }
+               i = 0;
+               netdev_for_each_mc_addr(ha, netdev)
+                       memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
 
-               e1000_update_mc_addr_list(hw, mta_list, i, 1,
-                                         mac->rar_entry_count);
+               e1000_update_mc_addr_list(hw, mta_list, i);
                kfree(mta_list);
        } else {
                /*
                 * if we're called from probe, we might not have
                 * anything to do here, so clear out the list
                 */
-               e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
+               e1000_update_mc_addr_list(hw, NULL, 0);
        }
 }
 
@@ -2623,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
        e1000_set_multi(adapter->netdev);
 
        e1000_restore_vlan(adapter);
-       e1000_init_manageability(adapter);
+       e1000_init_manageability_pt(adapter);
 
        e1000_configure_tx(adapter);
        e1000_setup_rctl(adapter);
@@ -2757,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
                        fc->high_water = 0x5000;
                        fc->low_water  = 0x3000;
                }
+               fc->refresh_time = 0x1000;
        } else {
                if ((adapter->flags & FLAG_HAS_ERT) &&
                    (adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2794,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
        if (mac->ops.init_hw(hw))
                e_err("Hardware Error\n");
 
-       /* additional part of the flow-control workaround above */
-       if (hw->mac.type == e1000_pchlan)
-               ew32(FCRTV_PCH, 0x1000);
-
        e1000_update_mng_vlan(adapter);
 
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2826,8 +3196,8 @@ int e1000e_up(struct e1000_adapter *adapter)
 
        /* DMA latency requirement to workaround early-receive/jumbo issue */
        if (adapter->flags & FLAG_HAS_ERT)
-               pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
-                                      adapter->netdev->name,
+               adapter->netdev->pm_qos_req =
+                       pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
                                       PM_QOS_DEFAULT_VALUE);
 
        /* hardware has been reset, we need to reload some things */
@@ -2843,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
        netif_wake_queue(adapter->netdev);
 
        /* fire a link change interrupt to start the watchdog */
-       ew32(ICS, E1000_ICS_LSC);
+       if (adapter->msix_entries)
+               ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+       else
+               ew32(ICS, E1000_ICS_LSC);
+
        return 0;
 }
 
@@ -2880,7 +3254,6 @@ void e1000e_down(struct e1000_adapter *adapter)
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
-       netdev->tx_queue_len = adapter->tx_queue_len;
        netif_carrier_off(netdev);
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
@@ -2890,9 +3263,11 @@ void e1000e_down(struct e1000_adapter *adapter)
        e1000_clean_tx_ring(adapter);
        e1000_clean_rx_ring(adapter);
 
-       if (adapter->flags & FLAG_HAS_ERT)
-               pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
-                                         adapter->netdev->name);
+       if (adapter->flags & FLAG_HAS_ERT) {
+               pm_qos_remove_request(
+                             adapter->netdev->pm_qos_req);
+               adapter->netdev->pm_qos_req = NULL;
+       }
 
        /*
         * TODO: for power management, we could drop the link and
@@ -3086,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       struct pci_dev *pdev = adapter->pdev;
        int err;
 
        /* disallow open during test */
        if (test_bit(__E1000_TESTING, &adapter->state))
                return -EBUSY;
 
+       pm_runtime_get_sync(&pdev->dev);
+
        netif_carrier_off(netdev);
 
        /* allocate transmit descriptors */
@@ -3104,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
        if (err)
                goto err_setup_rx;
 
+       /*
+        * If AMT is enabled, let the firmware know that the network
+        * interface is now open and reset the part to a known state.
+        */
+       if (adapter->flags & FLAG_HAS_AMT) {
+               e1000_get_hw_control(adapter);
+               e1000e_reset(adapter);
+       }
+
        e1000e_power_up_phy(adapter);
 
        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3112,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
                e1000_update_mng_vlan(adapter);
 
        /*
-        * If AMT is enabled, let the firmware know that the network
-        * interface is now open
-        */
-       if (adapter->flags & FLAG_HAS_AMT)
-               e1000_get_hw_control(adapter);
-
-       /*
         * before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
@@ -3152,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
 
        netif_start_queue(netdev);
 
+       adapter->idle_check = true;
+       pm_runtime_put(&pdev->dev);
+
        /* fire a link status change interrupt to start the watchdog */
-       ew32(ICS, E1000_ICS_LSC);
+       if (adapter->msix_entries)
+               ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+       else
+               ew32(ICS, E1000_ICS_LSC);
 
        return 0;
 
@@ -3165,6 +3551,7 @@ err_setup_rx:
        e1000e_free_tx_resources(adapter);
 err_setup_tx:
        e1000e_reset(adapter);
+       pm_runtime_put_sync(&pdev->dev);
 
        return err;
 }
@@ -3183,11 +3570,17 @@ err_setup_tx:
 static int e1000_close(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct pci_dev *pdev = adapter->pdev;
 
        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
-       e1000e_down(adapter);
+
+       pm_runtime_get_sync(&pdev->dev);
+
+       if (!test_bit(__E1000_DOWN, &adapter->state)) {
+               e1000e_down(adapter);
+               e1000_free_irq(adapter);
+       }
        e1000_power_down_phy(adapter);
-       e1000_free_irq(adapter);
 
        e1000e_free_tx_resources(adapter);
        e1000e_free_rx_resources(adapter);
@@ -3209,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
        if (adapter->flags & FLAG_HAS_AMT)
                e1000_release_hw_control(adapter);
 
+       pm_runtime_put_sync(&pdev->dev);
+
        return 0;
 }
 /**
@@ -3308,24 +3703,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        if ((hw->phy.type == e1000_phy_82578) ||
            (hw->phy.type == e1000_phy_82577)) {
                e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
-               e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
-               adapter->stats.scc += phy_data;
+               if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+                       adapter->stats.scc += phy_data;
 
                e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
-               e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
-               adapter->stats.ecol += phy_data;
+               if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+                       adapter->stats.ecol += phy_data;
 
                e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
-               e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
-               adapter->stats.mcc += phy_data;
+               if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+                       adapter->stats.mcc += phy_data;
 
                e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
-               e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
-               adapter->stats.latecol += phy_data;
+               if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+                       adapter->stats.latecol += phy_data;
 
                e1e_rphy(hw, HV_DC_UPPER, &phy_data);
-               e1e_rphy(hw, HV_DC_LOWER, &phy_data);
-               adapter->stats.dc += phy_data;
+               if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+                       adapter->stats.dc += phy_data;
        } else {
                adapter->stats.scc += er32(SCC);
                adapter->stats.ecol += er32(ECOL);
@@ -3353,8 +3748,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        if ((hw->phy.type == e1000_phy_82578) ||
            (hw->phy.type == e1000_phy_82577)) {
                e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
-               e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
-               hw->mac.collision_delta = phy_data;
+               if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+                       hw->mac.collision_delta = phy_data;
        } else {
                hw->mac.collision_delta = er32(COLC);
        }
@@ -3365,8 +3760,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        if ((hw->phy.type == e1000_phy_82578) ||
            (hw->phy.type == e1000_phy_82577)) {
                e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
-               e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
-               adapter->stats.tncrs += phy_data;
+               if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+                       adapter->stats.tncrs += phy_data;
        } else {
                if ((hw->mac.type != e1000_82574) &&
                    (hw->mac.type != e1000_82583))
@@ -3470,7 +3865,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
               ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
 }
 
-bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000e_has_link(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        bool link_active = 0;
@@ -3551,8 +3946,11 @@ static void e1000_watchdog_task(struct work_struct *work)
        u32 link, tctl;
        int tx_pending = 0;
 
-       link = e1000_has_link(adapter);
+       link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
+               /* Cancel scheduled suspend requests. */
+               pm_runtime_resume(netdev->dev.parent);
+
                e1000e_enable_receives(adapter);
                goto link_up;
        }
@@ -3564,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        bool txb2b = 1;
+
+                       /* Cancel scheduled suspend requests. */
+                       pm_runtime_resume(netdev->dev.parent);
+
                        /* update snapshot of PHY registers on LSC */
                        e1000_phy_read_status(adapter);
                        mac->ops.get_link_up_info(&adapter->hw,
@@ -3591,21 +3993,15 @@ static void e1000_watchdog_task(struct work_struct *work)
                                               "link gets many collisions.\n");
                        }
 
-                       /*
-                        * tweak tx_queue_len according to speed/duplex
-                        * and adjust the timeout factor
-                        */
-                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
                                txb2b = 0;
-                               netdev->tx_queue_len = 10;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = 0;
-                               netdev->tx_queue_len = 100;
                                adapter->tx_timeout_factor = 10;
                                break;
                        }
@@ -3679,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
 
                        if (adapter->flags & FLAG_RX_NEEDS_RESTART)
                                schedule_work(&adapter->reset_task);
+                       else
+                               pm_schedule_suspend(netdev->dev.parent,
+                                                       LINK_TIMEOUT);
                }
        }
 
@@ -3714,6 +4113,22 @@ link_up:
                }
        }
 
+       /* Simple mode for Interrupt Throttle Rate (ITR) */
+       if (adapter->itr_setting == 4) {
+               /*
+                * Symmetric Tx/Rx gets a reduced ITR=2000;
+                * Total asymmetrical Tx or Rx gets ITR=8000;
+                * everyone else is between 2000-8000.
+                */
+               u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+               u32 dif = (adapter->gotc > adapter->gorc ?
+                           adapter->gotc - adapter->gorc :
+                           adapter->gorc - adapter->gotc) / 10000;
+               u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+               ew32(ITR, 1000000000 / (itr * 256));
+       }
+
        /* Cause software interrupt to ensure Rx ring is cleaned */
        if (adapter->msix_entries)
                ew32(ICS, adapter->rx_ring->ims_val);
@@ -3774,7 +4189,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
                                                         0, IPPROTO_TCP, 0);
                cmd_length = E1000_TXD_CMD_IP;
                ipcse = skb_transport_offset(skb) - 1;
-       } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+       } else if (skb_is_gso_v6(skb)) {
                ipv6_hdr(skb)->payload_len = 0;
                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                       &ipv6_hdr(skb)->daddr,
@@ -3884,23 +4299,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        unsigned int mss)
 {
        struct e1000_ring *tx_ring = adapter->tx_ring;
+       struct pci_dev *pdev = adapter->pdev;
        struct e1000_buffer *buffer_info;
        unsigned int len = skb_headlen(skb);
-       unsigned int offset, size, count = 0, i;
-       unsigned int f;
-       dma_addr_t *map;
+       unsigned int offset = 0, size, count = 0, i;
+       unsigned int f, bytecount, segs;
 
        i = tx_ring->next_to_use;
 
-       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-               adapter->tx_dma_failed++;
-               return 0;
-       }
-
-       map = skb_shinfo(skb)->dma_maps;
-       offset = 0;
-
        while (len) {
                buffer_info = &tx_ring->buffer_info[i];
                size = min(len, max_per_txd);
@@ -3908,11 +4314,16 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                buffer_info->length = size;
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
-               buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
-               count++;
+               buffer_info->dma = dma_map_single(&pdev->dev,
+                                                 skb->data + offset,
+                                                 size, DMA_TO_DEVICE);
+               buffer_info->mapped_as_page = false;
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+                       goto dma_error;
 
                len -= size;
                offset += size;
+               count++;
 
                if (len) {
                        i++;
@@ -3926,7 +4337,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
 
                frag = &skb_shinfo(skb)->frags[f];
                len = frag->size;
-               offset = 0;
+               offset = frag->page_offset;
 
                while (len) {
                        i++;
@@ -3939,7 +4350,12 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        buffer_info->length = size;
                        buffer_info->time_stamp = jiffies;
                        buffer_info->next_to_watch = i;
-                       buffer_info->dma = map[f] + offset;
+                       buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
+                                                       offset, size,
+                                                       DMA_TO_DEVICE);
+                       buffer_info->mapped_as_page = true;
+                       if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+                               goto dma_error;
 
                        len -= size;
                        offset += size;
@@ -3947,10 +4363,32 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                }
        }
 
+       segs = skb_shinfo(skb)->gso_segs ?: 1;
+       /* multiply data chunks by size of headers */
+       bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
        tx_ring->buffer_info[i].skb = skb;
+       tx_ring->buffer_info[i].segs = segs;
+       tx_ring->buffer_info[i].bytecount = bytecount;
        tx_ring->buffer_info[first].next_to_watch = i;
 
        return count;
+
+dma_error:
+       dev_err(&pdev->dev, "TX DMA map failed\n");
+       buffer_info->dma = 0;
+       if (count)
+               count--;
+
+       while (count--) {
+               if (i==0)
+                       i += tx_ring->count;
+               i--;
+               buffer_info = &tx_ring->buffer_info[i];
+               e1000_put_txbuf(adapter, buffer_info);;
+       }
+
+       return 0;
 }
 
 static void e1000_tx_queue(struct e1000_adapter *adapter,
@@ -4023,8 +4461,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
        u16 length, offset;
 
        if (vlan_tx_tag_present(skb)) {
-               if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
-                   && (adapter->hw.mng_cookie.status &
+               if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+                   (adapter->hw.mng_cookie.status &
                        E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
                        return 0;
        }
@@ -4098,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        unsigned int max_per_txd = E1000_MAX_PER_TXD;
        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
-       unsigned int len = skb->len - skb->data_len;
+       unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
        unsigned int mss;
        int count = 0;
@@ -4148,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                dev_kfree_skb_any(skb);
                                return NETDEV_TX_OK;
                        }
-                       len = skb->len - skb->data_len;
+                       len = skb_headlen(skb);
                }
        }
 
@@ -4234,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
 
+       e1000e_dump(adapter);
+       e_err("Reset adapter\n");
        e1000e_reinit_locked(adapter);
 }
 
@@ -4276,6 +4716,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
        }
 
+       /* 82573 Errata 17 */
+       if (((adapter->hw.mac.type == e1000_82573) ||
+            (adapter->hw.mac.type == e1000_82574)) &&
+           (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+               adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+               e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                msleep(1);
        /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4294,13 +4742,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
         * fragmented skbs
         */
 
-       if (max_frame <= 256)
-               adapter->rx_buffer_len = 256;
-       else if (max_frame <= 512)
-               adapter->rx_buffer_len = 512;
-       else if (max_frame <= 1024)
-               adapter->rx_buffer_len = 1024;
-       else if (max_frame <= 2048)
+       if (max_frame <= 2048)
                adapter->rx_buffer_len = 2048;
        else
                adapter->rx_buffer_len = 4096;
@@ -4466,13 +4908,15 @@ out:
        return retval;
 }
 
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+                           bool runtime)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, ctrl_ext, rctl, status;
-       u32 wufc = adapter->wol;
+       /* Runtime suspend should only enable wakeup for link changes */
+       u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
        int retval = 0;
 
        netif_device_detach(netdev);
@@ -4518,7 +4962,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                    e1000_media_type_internal_serdes) {
                        /* keep the laser running in D3 */
                        ctrl_ext = er32(CTRL_EXT);
-                       ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+                       ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
                        ew32(CTRL_EXT, ctrl_ext);
                }
 
@@ -4604,45 +5048,51 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
        }
 }
 
-static void e1000e_disable_l1aspm(struct pci_dev *pdev)
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+       pci_disable_link_state(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
 {
        int pos;
-       u16 val;
+       u16 reg16;
 
        /*
-        * 82573 workaround - disable L1 ASPM on mobile chipsets
-        *
-        * L1 ASPM on various mobile (ich7) chipsets do not behave properly
-        * resulting in lost data or garbage information on the pci-e link
-        * level. This could result in (false) bad EEPROM checksum errors,
-        * long ping times (up to 2s) or even a system freeze/hang.
-        *
-        * Unfortunately this feature saves about 1W power consumption when
-        * active.
+        * Both device and parent should have the same ASPM setting.
+        * Disable ASPM in downstream component first and then upstream.
         */
-       pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-       pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
-       if (val & 0x2) {
-               dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
-               val &= ~0x2;
-               pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
-       }
-}
+       pos = pci_pcie_cap(pdev);
+       pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+       reg16 &= ~state;
+       pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
 
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+       if (!pdev->bus->self)
+               return;
+
+       pos = pci_pcie_cap(pdev->bus->self);
+       pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+       reg16 &= ~state;
+       pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
 {
-       int retval;
-       bool wake;
+       dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+                (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+                (state & PCIE_LINK_STATE_L1) ? "L1" : "");
 
-       retval = __e1000_shutdown(pdev, &wake);
-       if (!retval)
-               e1000_complete_shutdown(pdev, true, wake);
+       __e1000e_disable_aspm(pdev, state);
+}
 
-       return retval;
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
+{
+       return !!adapter->tx_ring->buffer_info;
 }
 
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4651,19 +5101,9 @@ static int e1000_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
-       e1000e_disable_l1aspm(pdev);
-
-       err = pci_enable_device_mem(pdev);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "Cannot enable PCI device from suspend\n");
-               return err;
-       }
-
-       pci_set_master(pdev);
-
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
+       pci_save_state(pdev);
+       if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
 
        e1000e_set_interrupt_capability(adapter);
        if (netif_running(netdev)) {
@@ -4705,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
 
        e1000e_reset(adapter);
 
-       e1000_init_manageability(adapter);
+       e1000_init_manageability_pt(adapter);
 
        if (netif_running(netdev))
                e1000e_up(adapter);
@@ -4722,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int retval;
+       bool wake;
+
+       retval = __e1000_shutdown(pdev, &wake, false);
+       if (!retval)
+               e1000_complete_shutdown(pdev, true, wake);
+
+       return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (e1000e_pm_ready(adapter))
+               adapter->idle_check = true;
+
+       return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (e1000e_pm_ready(adapter)) {
+               bool wake;
+
+               __e1000_shutdown(pdev, &wake, true);
+       }
+
+       return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (!e1000e_pm_ready(adapter))
+               return 0;
+
+       if (adapter->idle_check) {
+               adapter->idle_check = false;
+               if (!e1000e_has_link(adapter))
+                       pm_schedule_suspend(dev, MSEC_PER_SEC);
+       }
+
+       return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       if (!e1000e_pm_ready(adapter))
+               return 0;
+
+       adapter->idle_check = !dev->power.runtime_auto;
+       return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
        bool wake = false;
 
-       __e1000_shutdown(pdev, &wake);
+       __e1000_shutdown(pdev, &wake, false);
 
        if (system_state == SYSTEM_POWER_OFF)
                e1000_complete_shutdown(pdev, false, wake);
@@ -4793,7 +5308,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
        int err;
        pci_ers_result_t result;
 
-       e1000e_disable_l1aspm(pdev);
+       if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
        err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
@@ -4801,6 +5317,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_set_master(pdev);
+               pdev->state_saved = true;
                pci_restore_state(pdev);
 
                pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4829,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       e1000_init_manageability(adapter);
+       e1000_init_manageability_pt(adapter);
 
        if (netif_running(netdev)) {
                if (e1000e_up(adapter)) {
@@ -4886,13 +5403,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
                dev_warn(&adapter->pdev->dev,
                         "Warning: detected DSPD enabled in EEPROM\n");
        }
-
-       ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
-       if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
-               /* ASPM enable */
-               dev_warn(&adapter->pdev->dev,
-                        "Warning: detected ASPM enabled in EEPROM\n");
-       }
 }
 
 static const struct net_device_ops e1000e_netdev_ops = {
@@ -4941,23 +5451,24 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        u16 eeprom_data = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 
-       e1000e_disable_l1aspm(pdev);
+       if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
 
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
        pci_using_dac = 0;
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
                if (!err)
                        pci_using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = pci_set_consistent_dma_mask(pdev,
-                                                         DMA_BIT_MASK(32));
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   DMA_BIT_MASK(32));
                        if (err) {
                                dev_err(&pdev->dev, "No usable DMA "
                                        "configuration, aborting\n");
@@ -4988,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
+       netdev->irq = pdev->irq;
+
        pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
        hw = &adapter->hw;
@@ -5110,7 +5623,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        e1000_eeprom_checks(adapter);
 
-       /* copy the MAC address out of the NVM */
+       /* copy the MAC address */
        if (e1000e_read_mac_addr(&adapter->hw))
                e_err("NVM Read Error while reading MAC address\n");
 
@@ -5208,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        e1000_print_device_info(adapter);
 
+       if (pci_dev_run_wake(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
+       pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
        return 0;
 
 err_register:
@@ -5250,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+       pm_runtime_get_sync(&pdev->dev);
 
        /*
         * flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled
         */
-       set_bit(__E1000_DOWN, &adapter->state);
+       if (!down)
+               set_bit(__E1000_DOWN, &adapter->state);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
@@ -5269,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
        if (!(netdev->flags & IFF_UP))
                e1000_power_down_phy(adapter);
 
+       /* Don't lie to e1000_close() down the road. */
+       if (!down)
+               clear_bit(__E1000_DOWN, &adapter->state);
        unregister_netdev(netdev);
 
+       if (pci_dev_run_wake(pdev)) {
+               pm_runtime_disable(&pdev->dev);
+               pm_runtime_set_suspended(&pdev->dev);
+       }
+       pm_runtime_put_noidle(&pdev->dev);
+
        /*
         * Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
@@ -5302,7 +5834,7 @@ static struct pci_error_handlers e1000_err_handler = {
        .resume = e1000_io_resume,
 };
 
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -5360,6 +5892,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
 
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
 
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5370,16 +5903,22 @@ static struct pci_device_id e1000_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
 
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+       SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+                               e1000_runtime_resume, e1000_idle)
+};
+#endif
+
 /* PCI Device API Driver */
 static struct pci_driver e1000_driver = {
        .name     = e1000e_driver_name,
        .id_table = e1000_pci_tbl,
        .probe    = e1000_probe,
        .remove   = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
-       /* Power Management Hooks */
-       .suspend  = e1000_suspend,
-       .resume   = e1000_resume,
+#ifdef CONFIG_PM_OPS
+       .driver.pm = &e1000_pm_ops,
 #endif
        .shutdown = e1000_shutdown,
        .err_handler = &e1000_err_handler
@@ -5394,10 +5933,9 @@ static struct pci_driver e1000_driver = {
 static int __init e1000_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
-              e1000e_driver_name, e1000e_driver_version);
-       printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
-              e1000e_driver_name);
+       pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+               e1000e_driver_version);
+       pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
 
        return ret;