e1000e: update version from k4 to k6
[safe/jmp/linux-2.6] / drivers / net / e1000e / netdev.c
index c8dc47f..b81c423 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/pm_qos_params.h>
 
 #include "e1000.h"
 
-#define DRV_VERSION "0.2.0"
+#define DRV_VERSION "0.3.3.3-k6"
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -97,8 +98,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
 
        if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
                vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
-                                        le16_to_cpu(vlan) &
-                                        E1000_RXD_SPC_VLAN_MASK);
+                                        le16_to_cpu(vlan));
        else
                netif_receive_skb(skb);
 
@@ -195,7 +195,7 @@ map_skb:
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_buffer_len,
                                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        break;
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                                   ps_page->page,
                                                   0, PAGE_SIZE,
                                                   PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(ps_page->dma)) {
+                               if (pci_dma_mapping_error(pdev, ps_page->dma)) {
                                        dev_err(&adapter->pdev->dev,
                                          "RX DMA page map failed\n");
                                        adapter->rx_dma_failed++;
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_ps_bsize0,
                                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        /* cleanup skb */
@@ -341,6 +341,89 @@ no_buffers:
 }
 
 /**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @rx_ring: pointer to receive ring structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+                                         int cleaned_count)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       struct e1000_rx_desc *rx_desc;
+       struct e1000_ring *rx_ring = adapter->rx_ring;
+       struct e1000_buffer *buffer_info;
+       struct sk_buff *skb;
+       unsigned int i;
+       unsigned int bufsz = 256 -
+                            16 /* for skb_reserve */ -
+                            NET_IP_ALIGN;
+
+       i = rx_ring->next_to_use;
+       buffer_info = &rx_ring->buffer_info[i];
+
+       while (cleaned_count--) {
+               skb = buffer_info->skb;
+               if (skb) {
+                       skb_trim(skb, 0);
+                       goto check_page;
+               }
+
+               skb = netdev_alloc_skb(netdev, bufsz);
+               if (unlikely(!skb)) {
+                       /* Better luck next round */
+                       adapter->alloc_rx_buff_failed++;
+                       break;
+               }
+
+               /* Make buffer alignment 2 beyond a 16 byte boundary
+                * this will result in a 16 byte aligned IP header after
+                * the 14 byte MAC header is removed
+                */
+               skb_reserve(skb, NET_IP_ALIGN);
+
+               buffer_info->skb = skb;
+check_page:
+               /* allocate a new page if necessary */
+               if (!buffer_info->page) {
+                       buffer_info->page = alloc_page(GFP_ATOMIC);
+                       if (unlikely(!buffer_info->page)) {
+                               adapter->alloc_rx_buff_failed++;
+                               break;
+                       }
+               }
+
+               if (!buffer_info->dma)
+                       buffer_info->dma = pci_map_page(pdev,
+                                                       buffer_info->page, 0,
+                                                       PAGE_SIZE,
+                                                       PCI_DMA_FROMDEVICE);
+
+               rx_desc = E1000_RX_DESC(*rx_ring, i);
+               rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+               if (unlikely(++i == rx_ring->count))
+                       i = 0;
+               buffer_info = &rx_ring->buffer_info[i];
+       }
+
+       if (likely(rx_ring->next_to_use != i)) {
+               rx_ring->next_to_use = i;
+               if (unlikely(i-- == 0))
+                       i = (rx_ring->count - 1);
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64). */
+               wmb();
+               writel(i, adapter->hw.hw_addr + rx_ring->tail);
+       }
+}
+
+/**
  * e1000_clean_rx_irq - Send received data up the network stack; legacy
  * @adapter: board private structure
  *
@@ -401,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                 * packet, also make sure the frame isn't just CRC only */
                if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
                        /* All receives must fit into a single buffer */
-                       ndev_dbg(netdev, "%s: Receive packet consumed "
-                                "multiple buffers\n", netdev->name);
+                       e_dbg("%s: Receive packet consumed multiple buffers\n",
+                             netdev->name);
                        /* recycle */
                        buffer_info->skb = skb;
                        goto next_desc;
@@ -427,9 +510,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                            netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
-                               memcpy(new_skb->data - NET_IP_ALIGN,
-                                      skb->data - NET_IP_ALIGN,
-                                      length + NET_IP_ALIGN);
+                               skb_copy_to_linear_data_offset(new_skb,
+                                                              -NET_IP_ALIGN,
+                                                              (skb->data -
+                                                               NET_IP_ALIGN),
+                                                              (length +
+                                                               NET_IP_ALIGN));
                                /* save the skb in buffer_info as good */
                                buffer_info->skb = skb;
                                skb = new_skb;
@@ -466,10 +552,10 @@ next_desc:
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, cleaned_count);
 
-       adapter->total_rx_packets += total_rx_packets;
        adapter->total_rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       adapter->total_rx_packets += total_rx_packets;
        adapter->net_stats.rx_bytes += total_rx_bytes;
+       adapter->net_stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -493,28 +579,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
        unsigned int i = tx_ring->next_to_clean;
        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
        struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
-       struct net_device *netdev = adapter->netdev;
 
        /* detected Tx unit hang */
-       ndev_err(netdev,
-                "Detected Tx Unit Hang:\n"
-                "  TDH                  <%x>\n"
-                "  TDT                  <%x>\n"
-                "  next_to_use          <%x>\n"
-                "  next_to_clean        <%x>\n"
-                "buffer_info[next_to_clean]:\n"
-                "  time_stamp           <%lx>\n"
-                "  next_to_watch        <%x>\n"
-                "  jiffies              <%lx>\n"
-                "  next_to_watch.status <%x>\n",
-                readl(adapter->hw.hw_addr + tx_ring->head),
-                readl(adapter->hw.hw_addr + tx_ring->tail),
-                tx_ring->next_to_use,
-                tx_ring->next_to_clean,
-                tx_ring->buffer_info[eop].time_stamp,
-                eop,
-                jiffies,
-                eop_desc->upper.fields.status);
+       e_err("Detected Tx Unit Hang:\n"
+             "  TDH                  <%x>\n"
+             "  TDT                  <%x>\n"
+             "  next_to_use          <%x>\n"
+             "  next_to_clean        <%x>\n"
+             "buffer_info[next_to_clean]:\n"
+             "  time_stamp           <%lx>\n"
+             "  next_to_watch        <%x>\n"
+             "  jiffies              <%lx>\n"
+             "  next_to_watch.status <%x>\n",
+             readl(adapter->hw.hw_addr + tx_ring->head),
+             readl(adapter->hw.hw_addr + tx_ring->tail),
+             tx_ring->next_to_use,
+             tx_ring->next_to_clean,
+             tx_ring->buffer_info[eop].time_stamp,
+             eop,
+             jiffies,
+             eop_desc->upper.fields.status);
 }
 
 /**
@@ -606,8 +690,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
-       adapter->net_stats.tx_packets += total_tx_packets;
        adapter->net_stats.tx_bytes += total_tx_bytes;
+       adapter->net_stats.tx_packets += total_tx_packets;
        return cleaned;
 }
 
@@ -664,8 +748,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                buffer_info->dma = 0;
 
                if (!(staterr & E1000_RXD_STAT_EOP)) {
-                       ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "
-                                "up the full packet\n", netdev->name);
+                       e_dbg("%s: Packet Split buffers didn't pick up the "
+                             "full packet\n", netdev->name);
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
@@ -678,8 +762,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                length = le16_to_cpu(rx_desc->wb.middle.length0);
 
                if (!length) {
-                       ndev_dbg(netdev, "%s: Last part of the packet spanning"
-                                " multiple descriptors\n", netdev->name);
+                       e_dbg("%s: Last part of the packet spanning multiple "
+                             "descriptors\n", netdev->name);
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
@@ -775,10 +859,190 @@ next_desc:
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, cleaned_count);
 
-       adapter->total_rx_packets += total_rx_packets;
        adapter->total_rx_bytes += total_rx_bytes;
+       adapter->total_rx_packets += total_rx_packets;
+       adapter->net_stats.rx_bytes += total_rx_bytes;
        adapter->net_stats.rx_packets += total_rx_packets;
+       return cleaned;
+}
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+                               u16 length)
+{
+       bi->page = NULL;
+       skb->len += length;
+       skb->data_len += length;
+       skb->truesize += length;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+                                     int *work_done, int work_to_do)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       struct e1000_ring *rx_ring = adapter->rx_ring;
+       struct e1000_rx_desc *rx_desc, *next_rxd;
+       struct e1000_buffer *buffer_info, *next_buffer;
+       u32 length;
+       unsigned int i;
+       int cleaned_count = 0;
+       bool cleaned = false;
+       unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+       i = rx_ring->next_to_clean;
+       rx_desc = E1000_RX_DESC(*rx_ring, i);
+       buffer_info = &rx_ring->buffer_info[i];
+
+       while (rx_desc->status & E1000_RXD_STAT_DD) {
+               struct sk_buff *skb;
+               u8 status;
+
+               if (*work_done >= work_to_do)
+                       break;
+               (*work_done)++;
+
+               status = rx_desc->status;
+               skb = buffer_info->skb;
+               buffer_info->skb = NULL;
+
+               ++i;
+               if (i == rx_ring->count)
+                       i = 0;
+               next_rxd = E1000_RX_DESC(*rx_ring, i);
+               prefetch(next_rxd);
+
+               next_buffer = &rx_ring->buffer_info[i];
+
+               cleaned = true;
+               cleaned_count++;
+               pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+               buffer_info->dma = 0;
+
+               length = le16_to_cpu(rx_desc->length);
+
+               /* errors is only valid for DD + EOP descriptors */
+               if (unlikely((status & E1000_RXD_STAT_EOP) &&
+                   (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
+                               /* recycle both page and skb */
+                               buffer_info->skb = skb;
+                               /* an error means any chain goes out the window
+                                * too */
+                               if (rx_ring->rx_skb_top)
+                                       dev_kfree_skb(rx_ring->rx_skb_top);
+                               rx_ring->rx_skb_top = NULL;
+                               goto next_desc;
+               }
+
+#define rxtop rx_ring->rx_skb_top
+               if (!(status & E1000_RXD_STAT_EOP)) {
+                       /* this descriptor is only the beginning (or middle) */
+                       if (!rxtop) {
+                               /* this is the beginning of a chain */
+                               rxtop = skb;
+                               skb_fill_page_desc(rxtop, 0, buffer_info->page,
+                                                  0, length);
+                       } else {
+                               /* this is the middle of a chain */
+                               skb_fill_page_desc(rxtop,
+                                   skb_shinfo(rxtop)->nr_frags,
+                                   buffer_info->page, 0, length);
+                               /* re-use the skb, only consumed the page */
+                               buffer_info->skb = skb;
+                       }
+                       e1000_consume_page(buffer_info, rxtop, length);
+                       goto next_desc;
+               } else {
+                       if (rxtop) {
+                               /* end of the chain */
+                               skb_fill_page_desc(rxtop,
+                                   skb_shinfo(rxtop)->nr_frags,
+                                   buffer_info->page, 0, length);
+                               /* re-use the current skb, we only consumed the
+                                * page */
+                               buffer_info->skb = skb;
+                               skb = rxtop;
+                               rxtop = NULL;
+                               e1000_consume_page(buffer_info, skb, length);
+                       } else {
+                               /* no chain, got EOP, this buf is the packet
+                                * copybreak to save the put_page/alloc_page */
+                               if (length <= copybreak &&
+                                   skb_tailroom(skb) >= length) {
+                                       u8 *vaddr;
+                                       vaddr = kmap_atomic(buffer_info->page,
+                                                          KM_SKB_DATA_SOFTIRQ);
+                                       memcpy(skb_tail_pointer(skb), vaddr,
+                                              length);
+                                       kunmap_atomic(vaddr,
+                                                     KM_SKB_DATA_SOFTIRQ);
+                                       /* re-use the page, so don't erase
+                                        * buffer_info->page */
+                                       skb_put(skb, length);
+                               } else {
+                                       skb_fill_page_desc(skb, 0,
+                                                          buffer_info->page, 0,
+                                                          length);
+                                       e1000_consume_page(buffer_info, skb,
+                                                          length);
+                               }
+                       }
+               }
+
+               /* Receive Checksum Offload XXX recompute due to CRC strip? */
+               e1000_rx_checksum(adapter,
+                                 (u32)(status) |
+                                 ((u32)(rx_desc->errors) << 24),
+                                 le16_to_cpu(rx_desc->csum), skb);
+
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               /* eth type trans needs skb->data to point to something */
+               if (!pskb_may_pull(skb, ETH_HLEN)) {
+                       e_err("pskb_may_pull failed.\n");
+                       dev_kfree_skb(skb);
+                       goto next_desc;
+               }
+
+               e1000_receive_skb(adapter, netdev, skb, status,
+                                 rx_desc->special);
+
+next_desc:
+               rx_desc->status = 0;
+
+               /* return some buffers to hardware, one at a time is too slow */
+               if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+                       adapter->alloc_rx_buf(adapter, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               /* use prefetched values */
+               rx_desc = next_rxd;
+               buffer_info = next_buffer;
+       }
+       rx_ring->next_to_clean = i;
+
+       cleaned_count = e1000_desc_unused(rx_ring);
+       if (cleaned_count)
+               adapter->alloc_rx_buf(adapter, cleaned_count);
+
+       adapter->total_rx_bytes += total_rx_bytes;
+       adapter->total_rx_packets += total_rx_packets;
        adapter->net_stats.rx_bytes += total_rx_bytes;
+       adapter->net_stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
                                pci_unmap_single(pdev, buffer_info->dma,
                                                 adapter->rx_buffer_len,
                                                 PCI_DMA_FROMDEVICE);
+                       else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
+                               pci_unmap_page(pdev, buffer_info->dma,
+                                              PAGE_SIZE,
+                                              PCI_DMA_FROMDEVICE);
                        else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
                                pci_unmap_single(pdev, buffer_info->dma,
                                                 adapter->rx_ps_bsize0,
@@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
                        buffer_info->dma = 0;
                }
 
+               if (buffer_info->page) {
+                       put_page(buffer_info->page);
+                       buffer_info->page = NULL;
+               }
+
                if (buffer_info->skb) {
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
@@ -842,6 +1115,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
        writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+       struct e1000_adapter *adapter = container_of(work,
+                                       struct e1000_adapter, downshift_task);
+
+       e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
 /**
  * e1000_intr_msi - Interrupt Handler
  * @irq: interrupt number
@@ -866,7 +1147,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
                 */
                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
                    (!(er32(STATUS) & E1000_STATUS_LU)))
-                       e1000e_gig_downshift_workaround_ich8lan(hw);
+                       schedule_work(&adapter->downshift_task);
 
                /*
                 * 80003ES2LAN workaround-- For packet buffer work-around on
@@ -932,7 +1213,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
                 */
                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
                    (!(er32(STATUS) & E1000_STATUS_LU)))
-                       e1000e_gig_downshift_workaround_ich8lan(hw);
+                       schedule_work(&adapter->downshift_task);
 
                /*
                 * 80003ES2LAN workaround--
@@ -963,28 +1244,36 @@ static irqreturn_t e1000_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
 static int e1000_request_irq(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       irq_handler_t handler = e1000_intr;
        int irq_flags = IRQF_SHARED;
        int err;
 
-       if (!pci_enable_msi(adapter->pdev)) {
-               adapter->flags |= FLAG_MSI_ENABLED;
-               handler = e1000_intr_msi;
-               irq_flags = 0;
+       if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) {
+               err = pci_enable_msi(adapter->pdev);
+               if (!err) {
+                       adapter->flags |= FLAG_MSI_ENABLED;
+                       irq_flags = 0;
+               }
        }
 
-       err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
-                         netdev);
+       err = request_irq(adapter->pdev->irq,
+                         ((adapter->flags & FLAG_MSI_ENABLED) ?
+                               &e1000_intr_msi : &e1000_intr),
+                         irq_flags, netdev->name, netdev);
        if (err) {
-               ndev_err(netdev,
-                      "Unable to allocate %s interrupt (return: %d)\n",
-                       adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
-                       err);
-               if (adapter->flags & FLAG_MSI_ENABLED)
+               if (adapter->flags & FLAG_MSI_ENABLED) {
                        pci_disable_msi(adapter->pdev);
+                       adapter->flags &= ~FLAG_MSI_ENABLED;
+               }
+               e_err("Unable to allocate interrupt, Error: %d\n", err);
        }
 
        return err;
@@ -1123,8 +1412,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
        return 0;
 err:
        vfree(tx_ring->buffer_info);
-       ndev_err(adapter->netdev,
-       "Unable to allocate memory for the transmit descriptor ring\n");
+       e_err("Unable to allocate memory for the transmit descriptor ring\n");
        return err;
 }
 
@@ -1178,8 +1466,7 @@ err_pages:
        }
 err:
        vfree(rx_ring->buffer_info);
-       ndev_err(adapter->netdev,
-       "Unable to allocate memory for the transmit descriptor ring\n");
+       e_err("Unable to allocate memory for the transmit descriptor ring\n");
        return err;
 }
 
@@ -1520,7 +1807,6 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
                if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
                        /* enable VLAN receive filtering */
                        rctl = er32(RCTL);
-                       rctl |= E1000_RCTL_VFE;
                        rctl &= ~E1000_RCTL_CFIEN;
                        ew32(RCTL, rctl);
                        e1000_update_mng_vlan(adapter);
@@ -1532,10 +1818,6 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
                ew32(CTRL, ctrl);
 
                if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
-                       /* disable VLAN filtering */
-                       rctl = er32(RCTL);
-                       rctl &= ~E1000_RCTL_VFE;
-                       ew32(RCTL, rctl);
                        if (adapter->mng_vlan_id !=
                            (u16)E1000_MNG_VLAN_NONE) {
                                e1000_vlan_rx_kill_vid(netdev,
@@ -1755,10 +2037,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
         * a lot of memory, since we allocate 3 pages at all times
         * per packet.
         */
-       adapter->rx_ps_pages = 0;
        pages = PAGE_USE_COUNT(adapter->netdev->mtu);
-       if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
+       if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
+           (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
                adapter->rx_ps_pages = pages;
+       else
+               adapter->rx_ps_pages = 0;
 
        if (adapter->rx_ps_pages) {
                /* Configure extra packet-split registers */
@@ -1819,9 +2103,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
                        sizeof(union e1000_rx_desc_packet_split);
                adapter->clean_rx = e1000_clean_rx_irq_ps;
                adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+       } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
+               rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
+               adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+               adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
        } else {
-               rdlen = rx_ring->count *
-                       sizeof(struct e1000_rx_desc);
+               rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
                adapter->clean_rx = e1000_clean_rx_irq;
                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
        }
@@ -1885,8 +2172,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
         * units), e.g. using jumbo frames when setting to E1000_ERT_2048
         */
        if ((adapter->flags & FLAG_HAS_ERT) &&
-           (adapter->netdev->mtu > ETH_DATA_LEN))
-               ew32(ERT, E1000_ERT_2048);
+           (adapter->netdev->mtu > ETH_DATA_LEN)) {
+               u32 rxdctl = er32(RXDCTL(0));
+               ew32(RXDCTL(0), rxdctl | 0x3);
+               ew32(ERT, E1000_ERT_2048 | (1 << 13));
+               /*
+                * With jumbo frames and early-receive enabled, excessive
+                * C4->C2 latencies result in dropped transactions.
+                */
+               pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+                                         e1000e_driver_name, 55);
+       } else {
+               pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+                                         e1000e_driver_name,
+                                         PM_QOS_DEFAULT_VALUE);
+       }
 
        /* Enable Receives */
        ew32(RCTL, rctl);
@@ -1940,11 +2240,16 @@ static void e1000_set_multi(struct net_device *netdev)
 
        if (netdev->flags & IFF_PROMISC) {
                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-       } else if (netdev->flags & IFF_ALLMULTI) {
-               rctl |= E1000_RCTL_MPE;
-               rctl &= ~E1000_RCTL_UPE;
+               rctl &= ~E1000_RCTL_VFE;
        } else {
-               rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+               if (netdev->flags & IFF_ALLMULTI) {
+                       rctl |= E1000_RCTL_MPE;
+                       rctl &= ~E1000_RCTL_UPE;
+               } else {
+                       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+               }
+               if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
+                       rctl |= E1000_RCTL_VFE;
        }
 
        ew32(RCTL, rctl);
@@ -2155,10 +2460,18 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
        /* Allow time for pending master requests to run */
        mac->ops.reset_hw(hw);
+
+       /*
+        * For parts with AMT enabled, let the firmware know
+        * that the network interface is in control
+        */
+       if (adapter->flags & FLAG_HAS_AMT)
+               e1000_get_hw_control(adapter);
+
        ew32(WUC, 0);
 
        if (mac->ops.init_hw(hw))
-               ndev_err(adapter->netdev, "Hardware Error\n");
+               e_err("Hardware Error\n");
 
        e1000_update_mng_vlan(adapter);
 
@@ -2215,7 +2528,7 @@ void e1000e_down(struct e1000_adapter *adapter)
        ew32(RCTL, rctl & ~E1000_RCTL_EN);
        /* flush and sleep below */
 
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
 
        /* disable transmits in the hardware */
        tctl = er32(TCTL);
@@ -2236,7 +2549,8 @@ void e1000e_down(struct e1000_adapter *adapter)
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
 
-       e1000e_reset(adapter);
+       if (!pci_channel_offline(adapter->pdev))
+               e1000e_reset(adapter);
        e1000_clean_tx_ring(adapter);
        e1000_clean_rx_ring(adapter);
 
@@ -2286,19 +2600,146 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
        /* Explicitly disable IRQ since the NIC can be in any state. */
        e1000_irq_disable(adapter);
 
-       spin_lock_init(&adapter->stats_lock);
-
        set_bit(__E1000_DOWN, &adapter->state);
        return 0;
 
 err:
-       ndev_err(netdev, "Unable to allocate memory for queues\n");
+       e_err("Unable to allocate memory for queues\n");
        kfree(adapter->rx_ring);
        kfree(adapter->tx_ring);
        return -ENOMEM;
 }
 
 /**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+       struct net_device *netdev = data;
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       u32 icr = er32(ICR);
+
+       e_dbg("%s: icr is %08X\n", netdev->name, icr);
+       if (icr & E1000_ICR_RXSEQ) {
+               adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+               wmb();
+       }
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
+       int err;
+
+       /* poll_enable hasn't been called yet, so don't need disable */
+       /* clear any pending events */
+       er32(ICR);
+
+       /* free the real vector and request a test handler */
+       e1000_free_irq(adapter);
+
+       /* Assume that the test fails, if it succeeds then the test
+        * MSI irq handler will unset this flag */
+       adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+       err = pci_enable_msi(adapter->pdev);
+       if (err)
+               goto msi_test_failed;
+
+       err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
+                         netdev->name, netdev);
+       if (err) {
+               pci_disable_msi(adapter->pdev);
+               goto msi_test_failed;
+       }
+
+       wmb();
+
+       e1000_irq_enable(adapter);
+
+       /* fire an unusual interrupt on the test handler */
+       ew32(ICS, E1000_ICS_RXSEQ);
+       e1e_flush();
+       msleep(50);
+
+       e1000_irq_disable(adapter);
+
+       rmb();
+
+       if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+               err = -EIO;
+               e_info("MSI interrupt test failed!\n");
+       }
+
+       free_irq(adapter->pdev->irq, netdev);
+       pci_disable_msi(adapter->pdev);
+
+       if (err == -EIO)
+               goto msi_test_failed;
+
+       /* okay so the test worked, restore settings */
+       e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
+msi_test_failed:
+       /* restore the original vector, even if it failed */
+       e1000_request_irq(adapter);
+       return err;
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+       int err;
+       u16 pci_cmd;
+
+       if (!(adapter->flags & FLAG_MSI_ENABLED))
+               return 0;
+
+       /* disable SERR in case the MSI write causes a master abort */
+       pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+       pci_write_config_word(adapter->pdev, PCI_COMMAND,
+                             pci_cmd & ~PCI_COMMAND_SERR);
+
+       err = e1000_test_msi_interrupt(adapter);
+
+       /* restore previous setting of command word */
+       pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+
+       /* success ! */
+       if (!err)
+               return 0;
+
+       /* EIO means MSI test failed */
+       if (err != -EIO)
+               return err;
+
+       /* back to INTx mode */
+       e_warn("MSI interrupt test failed, using legacy interrupt.\n");
+
+       e1000_free_irq(adapter);
+
+       err = e1000_request_irq(adapter);
+
+       return err;
+}
+
+/**
  * e1000_open - Called when a network interface is made active
  * @netdev: network interface device structure
  *
@@ -2341,8 +2782,7 @@ static int e1000_open(struct net_device *netdev)
         * If AMT is enabled, let the firmware know that the network
         * interface is now open
         */
-       if ((adapter->flags & FLAG_HAS_AMT) &&
-           e1000e_check_mng_mode(&adapter->hw))
+       if (adapter->flags & FLAG_HAS_AMT)
                e1000_get_hw_control(adapter);
 
        /*
@@ -2357,6 +2797,19 @@ static int e1000_open(struct net_device *netdev)
        if (err)
                goto err_req_irq;
 
+       /*
+        * Work around PCIe errata with MSI interrupts causing some chipsets to
+        * ignore e1000e MSI messages, which means we need to test our MSI
+        * interrupt now
+        */
+       {
+               err = e1000_test_msi(adapter);
+               if (err) {
+                       e_err("Interrupt allocation failed\n");
+                       goto err_req_irq;
+               }
+       }
+
        /* From here on the code is the same as e1000e_up() */
        clear_bit(__E1000_DOWN, &adapter->state);
 
@@ -2364,6 +2817,8 @@ static int e1000_open(struct net_device *netdev)
 
        e1000_irq_enable(adapter);
 
+       netif_tx_start_all_queues(netdev);
+
        /* fire a link status change interrupt to start the watchdog */
        ew32(ICS, E1000_ICS_LSC);
 
@@ -2418,8 +2873,7 @@ static int e1000_close(struct net_device *netdev)
         * If AMT is enabled, let the firmware know that the network
         * interface is now closed
         */
-       if ((adapter->flags & FLAG_HAS_AMT) &&
-           e1000e_check_mng_mode(&adapter->hw))
+       if (adapter->flags & FLAG_HAS_AMT)
                e1000_release_hw_control(adapter);
 
        return 0;
@@ -2464,6 +2918,21 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
        return 0;
 }
 
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+       struct e1000_adapter *adapter = container_of(work,
+                                       struct e1000_adapter, update_phy_task);
+       e1000_get_phy_info(&adapter->hw);
+}
+
 /*
  * Need to wait a few seconds after link up to get diagnostic information from
  * the phy
@@ -2471,7 +2940,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
 static void e1000_update_phy_info(unsigned long data)
 {
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-       e1000_get_phy_info(&adapter->hw);
+       schedule_work(&adapter->update_phy_task);
 }
 
 /**
@@ -2482,10 +2951,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       unsigned long irq_flags;
-       u16 phy_tmp;
-
-#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
        /*
         * Prevent stats update while adapter is being reset, or if the pci
@@ -2496,66 +2961,29 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        if (pci_channel_offline(pdev))
                return;
 
-       spin_lock_irqsave(&adapter->stats_lock, irq_flags);
-
-       /*
-        * these counters are modified from e1000_adjust_tbi_stats,
-        * called from the interrupt context, so they must only
-        * be written while holding adapter->stats_lock
-        */
-
        adapter->stats.crcerrs += er32(CRCERRS);
        adapter->stats.gprc += er32(GPRC);
-       adapter->stats.gorcl += er32(GORCL);
-       adapter->stats.gorch += er32(GORCH);
+       adapter->stats.gorc += er32(GORCL);
+       er32(GORCH); /* Clear gorc */
        adapter->stats.bprc += er32(BPRC);
        adapter->stats.mprc += er32(MPRC);
        adapter->stats.roc += er32(ROC);
 
-       if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
-               adapter->stats.prc64 += er32(PRC64);
-               adapter->stats.prc127 += er32(PRC127);
-               adapter->stats.prc255 += er32(PRC255);
-               adapter->stats.prc511 += er32(PRC511);
-               adapter->stats.prc1023 += er32(PRC1023);
-               adapter->stats.prc1522 += er32(PRC1522);
-               adapter->stats.symerrs += er32(SYMERRS);
-               adapter->stats.sec += er32(SEC);
-       }
-
        adapter->stats.mpc += er32(MPC);
        adapter->stats.scc += er32(SCC);
        adapter->stats.ecol += er32(ECOL);
        adapter->stats.mcc += er32(MCC);
        adapter->stats.latecol += er32(LATECOL);
        adapter->stats.dc += er32(DC);
-       adapter->stats.rlec += er32(RLEC);
        adapter->stats.xonrxc += er32(XONRXC);
        adapter->stats.xontxc += er32(XONTXC);
        adapter->stats.xoffrxc += er32(XOFFRXC);
        adapter->stats.xofftxc += er32(XOFFTXC);
-       adapter->stats.fcruc += er32(FCRUC);
        adapter->stats.gptc += er32(GPTC);
-       adapter->stats.gotcl += er32(GOTCL);
-       adapter->stats.gotch += er32(GOTCH);
+       adapter->stats.gotc += er32(GOTCL);
+       er32(GOTCH); /* Clear gotc */
        adapter->stats.rnbc += er32(RNBC);
        adapter->stats.ruc += er32(RUC);
-       adapter->stats.rfc += er32(RFC);
-       adapter->stats.rjc += er32(RJC);
-       adapter->stats.torl += er32(TORL);
-       adapter->stats.torh += er32(TORH);
-       adapter->stats.totl += er32(TOTL);
-       adapter->stats.toth += er32(TOTH);
-       adapter->stats.tpr += er32(TPR);
-
-       if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
-               adapter->stats.ptc64 += er32(PTC64);
-               adapter->stats.ptc127 += er32(PTC127);
-               adapter->stats.ptc255 += er32(PTC255);
-               adapter->stats.ptc511 += er32(PTC511);
-               adapter->stats.ptc1023 += er32(PTC1023);
-               adapter->stats.ptc1522 += er32(PTC1522);
-       }
 
        adapter->stats.mptc += er32(MPTC);
        adapter->stats.bptc += er32(BPTC);
@@ -2574,19 +3002,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.tsctc += er32(TSCTC);
        adapter->stats.tsctfc += er32(TSCTFC);
 
-       adapter->stats.iac += er32(IAC);
-
-       if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) {
-               adapter->stats.icrxoc += er32(ICRXOC);
-               adapter->stats.icrxptc += er32(ICRXPTC);
-               adapter->stats.icrxatc += er32(ICRXATC);
-               adapter->stats.ictxptc += er32(ICTXPTC);
-               adapter->stats.ictxatc += er32(ICTXATC);
-               adapter->stats.ictxqec += er32(ICTXQEC);
-               adapter->stats.ictxqmtc += er32(ICTXQMTC);
-               adapter->stats.icrxdmtc += er32(ICRXDMTC);
-       }
-
        /* Fill out the OS statistics structure */
        adapter->net_stats.multicast = adapter->stats.mprc;
        adapter->net_stats.collisions = adapter->stats.colc;
@@ -2616,38 +3031,66 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 
        /* Tx Dropped needs to be maintained elsewhere */
 
-       /* Phy Stats */
-       if (hw->phy.media_type == e1000_media_type_copper) {
-               if ((adapter->link_speed == SPEED_1000) &&
-                  (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
-                       phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
-                       adapter->phy_stats.idle_errors += phy_tmp;
-               }
-       }
-
        /* Management Stats */
        adapter->stats.mgptc += er32(MGTPTC);
        adapter->stats.mgprc += er32(MGTPRC);
        adapter->stats.mgpdc += er32(MGTPDC);
+}
 
-       spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_phy_regs *phy = &adapter->phy_regs;
+       int ret_val;
+
+       if ((er32(STATUS) & E1000_STATUS_LU) &&
+           (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+               ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
+               ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
+               ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
+               ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
+               ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
+               ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
+               ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
+               ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+               if (ret_val)
+                       e_warn("Error reading PHY register\n");
+       } else {
+               /*
+                * Do not read PHY registers if link is not up
+                * Set values to typical power-on defaults
+                */
+               phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+               phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+                            BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+                            BMSR_ERCAP);
+               phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+                                 ADVERTISE_ALL | ADVERTISE_CSMA);
+               phy->lpa = 0;
+               phy->expansion = EXPANSION_ENABLENPAGE;
+               phy->ctrl1000 = ADVERTISE_1000FULL;
+               phy->stat1000 = 0;
+               phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+       }
 }
 
 static void e1000_print_link_info(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
        u32 ctrl = er32(CTRL);
 
-       ndev_info(netdev,
-               "Link is Up %d Mbps %s, Flow Control: %s\n",
-               adapter->link_speed,
-               (adapter->link_duplex == FULL_DUPLEX) ?
-                               "Full Duplex" : "Half Duplex",
-               ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-                               "RX/TX" :
-               ((ctrl & E1000_CTRL_RFCE) ? "RX" :
-               ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+       e_info("Link is Up %d Mbps %s, Flow Control: %s\n",
+              adapter->link_speed,
+              (adapter->link_duplex == FULL_DUPLEX) ?
+                               "Full Duplex" : "Half Duplex",
+              ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
+                               "RX/TX" :
+              ((ctrl & E1000_CTRL_RFCE) ? "RX" :
+              ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
 }
 
 static bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2687,8 +3130,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter)
        if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
            (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
                /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
-               ndev_info(adapter->netdev,
-                         "Gigabit has been disabled, downgrading speed\n");
+               e_info("Gigabit has been disabled, downgrading speed\n");
        }
 
        return link_active;
@@ -2745,6 +3187,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                if (!netif_carrier_ok(netdev)) {
                        bool txb2b = 1;
                        /* update snapshot of PHY registers on LSC */
+                       e1000_phy_read_status(adapter);
                        mac->ops.get_link_up_info(&adapter->hw,
                                                   &adapter->link_speed,
                                                   &adapter->link_duplex);
@@ -2759,7 +3202,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        case SPEED_10:
                                txb2b = 0;
                                netdev->tx_queue_len = 10;
-                               adapter->tx_timeout_factor = 14;
+                               adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = 0;
@@ -2788,8 +3231,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                                switch (adapter->link_speed) {
                                case SPEED_10:
                                case SPEED_100:
-                                       ndev_info(netdev,
-                                       "10/100 speed: disabling TSO\n");
+                                       e_info("10/100 speed: disabling TSO\n");
                                        netdev->features &= ~NETIF_F_TSO;
                                        netdev->features &= ~NETIF_F_TSO6;
                                        break;
@@ -2812,7 +3254,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        ew32(TCTL, tctl);
 
                        netif_carrier_on(netdev);
-                       netif_wake_queue(netdev);
+                       netif_tx_wake_all_queues(netdev);
 
                        if (!test_bit(__E1000_DOWN, &adapter->state))
                                mod_timer(&adapter->phy_info_timer,
@@ -2822,9 +3264,9 @@ static void e1000_watchdog_task(struct work_struct *work)
                if (netif_carrier_ok(netdev)) {
                        adapter->link_speed = 0;
                        adapter->link_duplex = 0;
-                       ndev_info(netdev, "Link is Down\n");
+                       e_info("Link is Down\n");
                        netif_carrier_off(netdev);
-                       netif_stop_queue(netdev);
+                       netif_tx_stop_all_queues(netdev);
                        if (!test_bit(__E1000_DOWN, &adapter->state))
                                mod_timer(&adapter->phy_info_timer,
                                          round_jiffies(jiffies + 2 * HZ));
@@ -2842,10 +3284,10 @@ link_up:
        mac->collision_delta = adapter->stats.colc - adapter->colc_old;
        adapter->colc_old = adapter->stats.colc;
 
-       adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
-       adapter->gorcl_old = adapter->stats.gorcl;
-       adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
-       adapter->gotcl_old = adapter->stats.gotcl;
+       adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+       adapter->gorc_old = adapter->stats.gorc;
+       adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+       adapter->gotc_old = adapter->stats.gotc;
 
        e1000e_update_adaptive(&adapter->hw);
 
@@ -3036,7 +3478,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                                skb->data + offset,
                                size,
                                PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
                        dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
                        adapter->tx_dma_failed++;
                        return -1;
@@ -3074,7 +3516,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                                        offset,
                                        size,
                                        PCI_DMA_TODEVICE);
-                       if (pci_dma_mapping_error(buffer_info->dma)) {
+                       if (pci_dma_mapping_error(adapter->pdev,
+                                                 buffer_info->dma)) {
                                dev_err(&adapter->pdev->dev,
                                        "TX DMA page map failed\n");
                                adapter->tx_dma_failed++;
@@ -3295,8 +3738,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
                        pull_size = min((unsigned int)4, skb->data_len);
                        if (!__pskb_pull_tail(skb, pull_size)) {
-                               ndev_err(netdev,
-                                        "__pskb_pull_tail failed.\n");
+                               e_err("__pskb_pull_tail failed.\n");
                                dev_kfree_skb_any(skb);
                                return NETDEV_TX_OK;
                        }
@@ -3426,27 +3868,27 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-       if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+       if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-               ndev_err(netdev, "Invalid MTU setting\n");
+               e_err("Invalid MTU setting\n");
                return -EINVAL;
        }
 
        /* Jumbo frame size limits */
        if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
                if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       ndev_err(netdev, "Jumbo Frames not supported.\n");
+                       e_err("Jumbo Frames not supported.\n");
                        return -EINVAL;
                }
                if (adapter->hw.phy.type == e1000_phy_ife) {
-                       ndev_err(netdev, "Jumbo Frames not supported.\n");
+                       e_err("Jumbo Frames not supported.\n");
                        return -EINVAL;
                }
        }
 
 #define MAX_STD_JUMBO_FRAME_SIZE 9234
        if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-               ndev_err(netdev, "MTU > 9216 not supported.\n");
+               e_err("MTU > 9216 not supported.\n");
                return -EINVAL;
        }
 
@@ -3462,6 +3904,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
         * i.e. RXBUFFER_2048 --> size-4096 slab
+        * However with the new *_jumbo_rx* routines, jumbo receives will use
+        * fragmented skbs
         */
 
        if (max_frame <= 256)
@@ -3481,8 +3925,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
                                         + ETH_FCS_LEN;
 
-       ndev_info(netdev, "changing MTU from %d to %d\n",
-               netdev->mtu, new_mtu);
+       e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
 
        if (netif_running(netdev))
@@ -3500,7 +3943,6 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct mii_ioctl_data *data = if_mii(ifr);
-       unsigned long irq_flags;
 
        if (adapter->hw.phy.media_type != e1000_media_type_copper)
                return -EOPNOTSUPP;
@@ -3512,13 +3954,40 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
        case SIOCGMIIREG:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-               spin_lock_irqsave(&adapter->stats_lock, irq_flags);
-               if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F,
-                                  &data->val_out)) {
-                       spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
+               switch (data->reg_num & 0x1F) {
+               case MII_BMCR:
+                       data->val_out = adapter->phy_regs.bmcr;
+                       break;
+               case MII_BMSR:
+                       data->val_out = adapter->phy_regs.bmsr;
+                       break;
+               case MII_PHYSID1:
+                       data->val_out = (adapter->hw.phy.id >> 16);
+                       break;
+               case MII_PHYSID2:
+                       data->val_out = (adapter->hw.phy.id & 0xFFFF);
+                       break;
+               case MII_ADVERTISE:
+                       data->val_out = adapter->phy_regs.advertise;
+                       break;
+               case MII_LPA:
+                       data->val_out = adapter->phy_regs.lpa;
+                       break;
+               case MII_EXPANSION:
+                       data->val_out = adapter->phy_regs.expansion;
+                       break;
+               case MII_CTRL1000:
+                       data->val_out = adapter->phy_regs.ctrl1000;
+                       break;
+               case MII_STAT1000:
+                       data->val_out = adapter->phy_regs.stat1000;
+                       break;
+               case MII_ESTATUS:
+                       data->val_out = adapter->phy_regs.estatus;
+                       break;
+               default:
                        return -EIO;
                }
-               spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
                break;
        case SIOCSMIIREG:
        default:
@@ -3593,6 +4062,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
                        ew32(CTRL_EXT, ctrl_ext);
                }
 
+               if (adapter->flags & FLAG_IS_ICH)
+                       e1000e_disable_gig_wol_ich8lan(&adapter->hw);
+
                /* Allow time for pending master requests to run */
                e1000e_disable_pcie_master(&adapter->hw);
 
@@ -3665,7 +4137,8 @@ static int e1000_resume(struct pci_dev *pdev)
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        e1000e_disable_l1aspm(pdev);
-       err = pci_enable_device(pdev);
+
+       err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
                        "Cannot enable PCI device from suspend\n");
@@ -3699,7 +4172,7 @@ static int e1000_resume(struct pci_dev *pdev)
         * is up.  For all other cases, let the f/w know that the h/w is now
         * under the control of the driver.
         */
-       if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
+       if (!(adapter->flags & FLAG_HAS_AMT))
                e1000_get_hw_control(adapter);
 
        return 0;
@@ -3724,8 +4197,6 @@ static void e1000_netpoll(struct net_device *netdev)
        disable_irq(adapter->pdev->irq);
        e1000_intr(adapter->pdev->irq, netdev);
 
-       e1000_clean_tx_irq(adapter);
-
        enable_irq(adapter->pdev->irq);
 }
 #endif
@@ -3766,14 +4237,17 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       int err;
 
        e1000e_disable_l1aspm(pdev);
-       if (pci_enable_device(pdev)) {
+       err = pci_enable_device_mem(pdev);
+       if (err) {
                dev_err(&pdev->dev,
                        "Cannot re-enable PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
        pci_set_master(pdev);
+       pci_restore_state(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -3814,8 +4288,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
         * is up.  For all other cases, let the f/w know that the h/w is now
         * under the control of the driver.
         */
-       if (!(adapter->flags & FLAG_HAS_AMT) ||
-           !e1000e_check_mng_mode(&adapter->hw))
+       if (!(adapter->flags & FLAG_HAS_AMT))
                e1000_get_hw_control(adapter);
 
 }
@@ -3827,22 +4300,41 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
        u32 pba_num;
 
        /* print bus type/speed/width info */
-       ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
-                 "%02x:%02x:%02x:%02x:%02x:%02x\n",
-                 /* bus width */
-                ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
-                 "Width x1"),
-                 /* MAC address */
-                 netdev->dev_addr[0], netdev->dev_addr[1],
-                 netdev->dev_addr[2], netdev->dev_addr[3],
-                 netdev->dev_addr[4], netdev->dev_addr[5]);
-       ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
-                 (hw->phy.type == e1000_phy_ife)
-                  ? "10/100" : "1000");
+       e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
+              /* bus width */
+              ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+               "Width x1"),
+              /* MAC address */
+              netdev->dev_addr[0], netdev->dev_addr[1],
+              netdev->dev_addr[2], netdev->dev_addr[3],
+              netdev->dev_addr[4], netdev->dev_addr[5]);
+       e_info("Intel(R) PRO/%s Network Connection\n",
+              (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
        e1000e_read_pba_num(hw, &pba_num);
-       ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-                 hw->mac.type, hw->phy.type,
-                 (pba_num >> 8), (pba_num & 0xff));
+       e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+              hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
+}
+
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       int ret_val;
+       u16 buf = 0;
+
+       if (hw->mac.type != e1000_82573)
+               return;
+
+       ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+       if (!(le16_to_cpu(buf) & (1 << 0))) {
+               /* Deep Smart Power Down (DSPD) */
+               e_warn("Warning: detected DSPD enabled in EEPROM\n");
+       }
+
+       ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
+       if (le16_to_cpu(buf) & (3 << 2)) {
+               /* ASPM enable */
+               e_warn("Warning: detected ASPM enabled in EEPROM\n");
+       }
 }
 
 /**
@@ -3863,8 +4355,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        struct e1000_adapter *adapter;
        struct e1000_hw *hw;
        const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
-       unsigned long mmio_start, mmio_len;
-       unsigned long flash_start, flash_len;
+       resource_size_t mmio_start, mmio_len;
+       resource_size_t flash_start, flash_len;
 
        static int cards_found;
        int i, err, pci_using_dac;
@@ -3872,7 +4364,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 
        e1000e_disable_l1aspm(pdev);
-       err = pci_enable_device(pdev);
+
+       err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
@@ -3895,11 +4388,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                }
        }
 
-       err = pci_request_regions(pdev, e1000e_driver_name);
+       err = pci_request_selected_regions(pdev,
+                                         pci_select_bars(pdev, IORESOURCE_MEM),
+                                         e1000e_driver_name);
        if (err)
                goto err_pci_reg;
 
        pci_set_master(pdev);
+       pci_save_state(pdev);
 
        err = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -3963,6 +4459,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        adapter->bd_number = cards_found++;
 
+       e1000e_check_options(adapter);
+
        /* setup adapter struct */
        err = e1000_sw_init(adapter);
        if (err)
@@ -3978,6 +4476,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        if (err)
                goto err_hw_init;
 
+       if ((adapter->flags & FLAG_IS_ICH) &&
+           (adapter->flags & FLAG_READ_ONLY_NVM))
+               e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
        hw->mac.ops.get_bus_info(&adapter->hw);
 
        adapter->hw.phy.autoneg_wait_to_complete = 0;
@@ -3990,8 +4492,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        }
 
        if (e1000_check_reset_block(&adapter->hw))
-               ndev_info(netdev,
-                         "PHY reset is blocked due to SOL/IDER session.\n");
+               e_info("PHY reset is blocked due to SOL/IDER session.\n");
 
        netdev->features = NETIF_F_SG |
                           NETIF_F_HW_CSUM |
@@ -4004,6 +4505,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        netdev->features |= NETIF_F_TSO;
        netdev->features |= NETIF_F_TSO6;
 
+       netdev->vlan_features |= NETIF_F_TSO;
+       netdev->vlan_features |= NETIF_F_TSO6;
+       netdev->vlan_features |= NETIF_F_HW_CSUM;
+       netdev->vlan_features |= NETIF_F_SG;
+
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
@@ -4030,25 +4536,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
                        break;
                if (i == 2) {
-                       ndev_err(netdev, "The NVM Checksum Is Not Valid\n");
+                       e_err("The NVM Checksum Is Not Valid\n");
                        err = -EIO;
                        goto err_eeprom;
                }
        }
 
+       e1000_eeprom_checks(adapter);
+
        /* copy the MAC address out of the NVM */
        if (e1000e_read_mac_addr(&adapter->hw))
-               ndev_err(netdev, "NVM Read Error while reading MAC address\n");
+               e_err("NVM Read Error while reading MAC address\n");
 
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
        memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->perm_addr)) {
-               ndev_err(netdev, "Invalid MAC Address: "
-                        "%02x:%02x:%02x:%02x:%02x:%02x\n",
-                        netdev->perm_addr[0], netdev->perm_addr[1],
-                        netdev->perm_addr[2], netdev->perm_addr[3],
-                        netdev->perm_addr[4], netdev->perm_addr[5]);
+               e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+                     netdev->perm_addr[0], netdev->perm_addr[1],
+                     netdev->perm_addr[2], netdev->perm_addr[3],
+                     netdev->perm_addr[4], netdev->perm_addr[5]);
                err = -EIO;
                goto err_eeprom;
        }
@@ -4063,8 +4570,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        INIT_WORK(&adapter->reset_task, e1000_reset_task);
        INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
-
-       e1000e_check_options(adapter);
+       INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+       INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
 
        /* Initialize link parameters. User can change them with ethtool */
        adapter->hw.mac.autoneg = 1;
@@ -4118,13 +4625,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
         * is up.  For all other cases, let the f/w know that the h/w is now
         * under the control of the driver.
         */
-       if (!(adapter->flags & FLAG_HAS_AMT) ||
-           !e1000e_check_mng_mode(&adapter->hw))
+       if (!(adapter->flags & FLAG_HAS_AMT))
                e1000_get_hw_control(adapter);
 
        /* tell the stack to leave us alone until e1000_open() is called */
        netif_carrier_off(netdev);
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
 
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
@@ -4136,24 +4642,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
-err_hw_init:
-       e1000_release_hw_control(adapter);
+       if (!(adapter->flags & FLAG_HAS_AMT))
+               e1000_release_hw_control(adapter);
 err_eeprom:
        if (!e1000_check_reset_block(&adapter->hw))
                e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
 
-       if (adapter->hw.flash_address)
-               iounmap(adapter->hw.flash_address);
-
-err_flashmap:
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 err_sw_init:
+       if (adapter->hw.flash_address)
+               iounmap(adapter->hw.flash_address);
+err_flashmap:
        iounmap(adapter->hw.hw_addr);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -4201,7 +4708,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
        iounmap(adapter->hw.hw_addr);
        if (adapter->hw.flash_address)
                iounmap(adapter->hw.flash_address);
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
 
        free_netdev(netdev);
 
@@ -4257,6 +4765,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
 
        { }     /* terminate list */
 };
@@ -4291,7 +4806,9 @@ static int __init e1000_init_module(void)
        printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
               e1000e_driver_name);
        ret = pci_register_driver(&e1000_driver);
-
+       pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
+                              PM_QOS_DEFAULT_VALUE);
+                               
        return ret;
 }
 module_init(e1000_init_module);
@@ -4305,6 +4822,7 @@ module_init(e1000_init_module);
 static void __exit e1000_exit_module(void)
 {
        pci_unregister_driver(&e1000_driver);
+       pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
 }
 module_exit(e1000_exit_module);