e1000e: don't generate bad checksums for tcp packets with 0 csum
[safe/jmp/linux-2.6] / drivers / net / ixgbe / ixgbe_main.c
index 88341bf..ca17af4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
   the file called "COPYING".
 
   Contact Information:
-  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
-       "Intel(R) 10 Gigabit PCI Express Network Driver";
+                              "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "1.1.18"
+#define DRV_VERSION "1.3.30-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static const char ixgbe_copyright[] =
-        "Copyright (c) 1999-2007 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
-       [board_82598AF]                 = &ixgbe_82598AF_info,
-       [board_82598EB]                 = &ixgbe_82598EB_info,
-       [board_82598AT]                 = &ixgbe_82598AT_info,
+       [board_82598] = &ixgbe_82598_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -69,19 +65,31 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
  */
 static struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
-        board_82598AF },
+        board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
-        board_82598AF },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
-        board_82598AT },
+        board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
-        board_82598EB },
+        board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
+        board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
+        board_82598 },
 
        /* required last entry */
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
 
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+                            void *p);
+static struct notifier_block dca_notifier = {
+       .notifier_call = ixgbe_notify_dca,
+       .next          = NULL,
+       .priority      = 0
+};
+#endif
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -89,22 +97,28 @@ MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+       u32 ctrl_ext;
 
-#ifdef DEBUG
-/**
- * ixgbe_get_hw_dev_name - return device name string
- * used by hardware layer to print debugging information
- **/
-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+       /* Let firmware take over control of h/w */
+       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+                       ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_adapter *adapter = hw->back;
-       struct net_device *netdev = adapter->netdev;
-       return netdev->name;
+       u32 ctrl_ext;
+
+       /* Let firmware know the driver has taken over */
+       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+                       ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
 }
-#endif
 
 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
-                          u8 msix_vector)
+                           u8 msix_vector)
 {
        u32 ivar, index;
 
@@ -117,13 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
 }
 
 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
-                                            struct ixgbe_tx_buffer
-                                            *tx_buffer_info)
+                                             struct ixgbe_tx_buffer
+                                             *tx_buffer_info)
 {
        if (tx_buffer_info->dma) {
-               pci_unmap_page(adapter->pdev,
-                              tx_buffer_info->dma,
-                              tx_buffer_info->length, PCI_DMA_TODEVICE);
+               pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
+                              tx_buffer_info->length, PCI_DMA_TODEVICE);
                tx_buffer_info->dma = 0;
        }
        if (tx_buffer_info->skb) {
@@ -134,148 +147,315 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
 }
 
 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
-                                      struct ixgbe_ring *tx_ring,
-                                      unsigned int eop,
-                                      union ixgbe_adv_tx_desc *eop_desc)
+                                       struct ixgbe_ring *tx_ring,
+                                       unsigned int eop)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 head, tail;
+
        /* Detect a transmit hang in hardware, this serializes the
-        * check with the clearing of time_stamp and movement of i */
+        * check with the clearing of time_stamp and movement of eop */
+       head = IXGBE_READ_REG(hw, tx_ring->head);
+       tail = IXGBE_READ_REG(hw, tx_ring->tail);
        adapter->detect_tx_hung = false;
-       if (tx_ring->tx_buffer_info[eop].dma &&
+       if ((head != tail) &&
+           tx_ring->tx_buffer_info[eop].time_stamp &&
            time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
            !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
                /* detected Tx unit hang */
+               union ixgbe_adv_tx_desc *tx_desc;
+               tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
                DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
-                       "  TDH                  <%x>\n"
-                       "  TDT                  <%x>\n"
+                       "  Tx Queue             <%d>\n"
+                       "  TDH, TDT             <%x>, <%x>\n"
                        "  next_to_use          <%x>\n"
                        "  next_to_clean        <%x>\n"
                        "tx_buffer_info[next_to_clean]\n"
                        "  time_stamp           <%lx>\n"
-                       "  next_to_watch        <%x>\n"
-                       "  jiffies              <%lx>\n"
-                       "  next_to_watch.status <%x>\n",
-                       readl(adapter->hw.hw_addr + tx_ring->head),
-                       readl(adapter->hw.hw_addr + tx_ring->tail),
-                       tx_ring->next_to_use,
-                       tx_ring->next_to_clean,
-                       tx_ring->tx_buffer_info[eop].time_stamp,
-                       eop, jiffies, eop_desc->wb.status);
+                       "  jiffies              <%lx>\n",
+                       tx_ring->queue_index,
+                       head, tail,
+                       tx_ring->next_to_use, eop,
+                       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
                return true;
        }
 
        return false;
 }
 
+#define IXGBE_MAX_TXD_PWR       14
+#define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+                        (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+
+#define GET_TX_HEAD_FROM_RING(ring) (\
+       *(volatile u32 *) \
+       ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
+static void ixgbe_tx_timeout(struct net_device *netdev);
+
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @adapter: board private structure
+ * @tx_ring: tx ring to clean
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
-                                   struct ixgbe_ring *tx_ring)
+                               struct ixgbe_ring *tx_ring)
 {
-       struct net_device *netdev = adapter->netdev;
-       union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+       union ixgbe_adv_tx_desc *tx_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int i, eop;
-       bool cleaned = false;
-       int count = 0;
+       struct net_device *netdev = adapter->netdev;
+       struct sk_buff *skb;
+       unsigned int i;
+       u32 head, oldhead;
+       unsigned int count = 0;
+       unsigned int total_bytes = 0, total_packets = 0;
 
+       rmb();
+       head = GET_TX_HEAD_FROM_RING(tx_ring);
+       head = le32_to_cpu(head);
        i = tx_ring->next_to_clean;
-       eop = tx_ring->tx_buffer_info[i].next_to_watch;
-       eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-       while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
-               for (cleaned = false; !cleaned;) {
+       while (1) {
+               while (i != head) {
                        tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       cleaned = (i == eop);
+                       skb = tx_buffer_info->skb;
+
+                       if (skb) {
+                               unsigned int segs, bytecount;
+
+                               /* gso_segs is currently only valid for tcp */
+                               segs = skb_shinfo(skb)->gso_segs ?: 1;
+                               /* multiply data chunks by size of headers */
+                               bytecount = ((segs - 1) * skb_headlen(skb)) +
+                                           skb->len;
+                               total_packets += segs;
+                               total_bytes += bytecount;
+                       }
 
-                       tx_ring->stats.bytes += tx_buffer_info->length;
                        ixgbe_unmap_and_free_tx_resource(adapter,
-                                                        tx_buffer_info);
-                       tx_desc->wb.status = 0;
+                                                        tx_buffer_info);
 
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
+
+                       count++;
+                       if (count == tx_ring->count)
+                               goto done_cleaning;
+               }
+               oldhead = head;
+               rmb();
+               head = GET_TX_HEAD_FROM_RING(tx_ring);
+               head = le32_to_cpu(head);
+               if (head == oldhead)
+                       goto done_cleaning;
+       } /* while (1) */
+
+done_cleaning:
+       tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+       if (unlikely(count && netif_carrier_ok(netdev) &&
+                    (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+               /* Make sure that anybody stopping the queue after this
+                * sees the new next_to_clean.
+                */
+               smp_mb();
+               if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+                   !test_bit(__IXGBE_DOWN, &adapter->state)) {
+                       netif_wake_subqueue(netdev, tx_ring->queue_index);
+                       ++adapter->restart_queue;
                }
+       }
 
-               tx_ring->stats.packets++;
+       if (adapter->detect_tx_hung) {
+               if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+                       /* schedule immediate reset if we believe we hung */
+                       DPRINTK(PROBE, INFO,
+                               "tx hang %d detected, resetting adapter\n",
+                               adapter->tx_timeout_count + 1);
+                       ixgbe_tx_timeout(adapter->netdev);
+               }
+       }
 
-               eop = tx_ring->tx_buffer_info[i].next_to_watch;
-               eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+       /* re-arm the interrupt */
+       if ((total_packets >= tx_ring->work_limit) ||
+           (count == tx_ring->count))
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
+
+       tx_ring->total_bytes += total_bytes;
+       tx_ring->total_packets += total_packets;
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
+       adapter->net_stats.tx_bytes += total_bytes;
+       adapter->net_stats.tx_packets += total_packets;
+       return (total_packets ? true : false);
+}
 
-               /* weight of a sort for tx, avoid endless transmit cleanup */
-               if (count++ >= tx_ring->work_limit)
-                       break;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *rx_ring)
+{
+       u32 rxctrl;
+       int cpu = get_cpu();
+       int q = rx_ring - adapter->rx_ring;
+
+       if (rx_ring->cpu != cpu) {
+               rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
+               rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+               rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+               rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
+               rx_ring->cpu = cpu;
        }
+       put_cpu();
+}
 
-       tx_ring->next_to_clean = i;
+static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *tx_ring)
+{
+       u32 txctrl;
+       int cpu = get_cpu();
+       int q = tx_ring - adapter->tx_ring;
+
+       if (tx_ring->cpu != cpu) {
+               txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
+               txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+               txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
+               tx_ring->cpu = cpu;
+       }
+       put_cpu();
+}
 
-#define TX_WAKE_THRESHOLD 32
-       spin_lock(&tx_ring->tx_lock);
+static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
+{
+       int i;
 
-       if (cleaned && netif_carrier_ok(netdev) &&
-           (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
-           !test_bit(__IXGBE_DOWN, &adapter->state))
-               netif_wake_queue(netdev);
+       if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+               return;
 
-       spin_unlock(&tx_ring->tx_lock);
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               adapter->tx_ring[i].cpu = -1;
+               ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+       }
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               adapter->rx_ring[i].cpu = -1;
+               ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+       }
+}
 
-       if (adapter->detect_tx_hung)
-               if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
-                       netif_stop_queue(netdev);
+static int __ixgbe_notify_dca(struct device *dev, void *data)
+{
+       struct net_device *netdev = dev_get_drvdata(dev);
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       unsigned long event = *(unsigned long *)data;
 
-       if (count >= tx_ring->work_limit)
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
+       switch (event) {
+       case DCA_PROVIDER_ADD:
+               /* if we're already enabled, don't do it again */
+               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+                       break;
+               /* Always use CB2 mode, difference is masked
+                * in the CB driver. */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+               if (dca_add_requester(dev) == 0) {
+                       adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+                       ixgbe_setup_dca(adapter);
+                       break;
+               }
+               /* Fall Through since DCA is disabled. */
+       case DCA_PROVIDER_REMOVE:
+               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+                       dca_remove_requester(dev);
+                       adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+               }
+               break;
+       }
 
-       return cleaned;
+       return 0;
 }
 
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
 /**
  * ixgbe_receive_skb - Send a completed packet up the stack
  * @adapter: board private structure
  * @skb: packet to send up
- * @is_vlan: packet has a VLAN tag
- * @tag: VLAN tag from descriptor
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
  **/
 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
-                             struct sk_buff *skb, bool is_vlan,
-                             u16 tag)
+                              struct sk_buff *skb, u8 status,
+                              struct ixgbe_ring *ring,
+                              union ixgbe_adv_rx_desc *rx_desc)
 {
-       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
-               if (adapter->vlgrp && is_vlan)
-                       vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
-               else
-                       netif_receive_skb(skb);
-       } else {
+       bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+       u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
+       if (adapter->netdev->features & NETIF_F_LRO &&
+           skb->ip_summed == CHECKSUM_UNNECESSARY) {
                if (adapter->vlgrp && is_vlan)
-                       vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+                       lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
+                                                    adapter->vlgrp, tag,
+                                                    rx_desc);
                else
-                       netif_rx(skb);
+                       lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
+               ring->lro_used = true;
+       } else {
+               if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+                       if (adapter->vlgrp && is_vlan)
+                               vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
+                       else
+                               netif_receive_skb(skb);
+               } else {
+                       if (adapter->vlgrp && is_vlan)
+                               vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+                       else
+                               netif_rx(skb);
+               }
        }
 }
 
+/**
+ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
-                                        u32 status_err,
-                                        struct sk_buff *skb)
+                                     u32 status_err, struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
 
-       /* Ignore Checksum bit is set */
-       if ((status_err & IXGBE_RXD_STAT_IXSM) ||
-                    !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+       /* Rx csum disabled */
+       if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+               return;
+
+       /* if IP and error */
+       if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+           (status_err & IXGBE_RXDADV_ERR_IPE)) {
+               adapter->hw_csum_rx_error++;
+               return;
+       }
+
+       if (!(status_err & IXGBE_RXD_STAT_L4CS))
                return;
-       /* TCP/UDP checksum error bit is set */
-       if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
-               /* let the stack verify checksum errors */
+
+       if (status_err & IXGBE_RXDADV_ERR_TCPE) {
                adapter->hw_csum_rx_error++;
                return;
        }
+
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_rx_good++;
 }
 
@@ -284,37 +464,44 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
  * @adapter: address of board private structure
  **/
 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-                                      struct ixgbe_ring *rx_ring,
-                                      int cleaned_count)
+                                   struct ixgbe_ring *rx_ring,
+                                   int cleaned_count)
 {
-       struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc;
-       struct ixgbe_rx_buffer *rx_buffer_info;
-       struct sk_buff *skb;
+       struct ixgbe_rx_buffer *bi;
        unsigned int i;
-       unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
+       unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
 
        i = rx_ring->next_to_use;
-       rx_buffer_info = &rx_ring->rx_buffer_info[i];
+       bi = &rx_ring->rx_buffer_info[i];
 
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 
-               if (!rx_buffer_info->page &&
-                               (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
-                       rx_buffer_info->page = alloc_page(GFP_ATOMIC);
-                       if (!rx_buffer_info->page) {
-                               adapter->alloc_rx_page_failed++;
-                               goto no_buffers;
+               if (!bi->page_dma &&
+                   (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+                       if (!bi->page) {
+                               bi->page = alloc_page(GFP_ATOMIC);
+                               if (!bi->page) {
+                                       adapter->alloc_rx_page_failed++;
+                                       goto no_buffers;
+                               }
+                               bi->page_offset = 0;
+                       } else {
+                               /* use a half page if we're re-using */
+                               bi->page_offset ^= (PAGE_SIZE / 2);
                        }
-                       rx_buffer_info->page_dma =
-                           pci_map_page(pdev, rx_buffer_info->page,
-                                        0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+                       bi->page_dma = pci_map_page(pdev, bi->page,
+                                                   bi->page_offset,
+                                                   (PAGE_SIZE / 2),
+                                                   PCI_DMA_FROMDEVICE);
                }
 
-               if (!rx_buffer_info->skb) {
-                       skb = netdev_alloc_skb(netdev, bufsz);
+               if (!bi->skb) {
+                       struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
+                                                              bufsz);
 
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
@@ -328,28 +515,25 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                         */
                        skb_reserve(skb, NET_IP_ALIGN);
 
-                       rx_buffer_info->skb = skb;
-                       rx_buffer_info->dma = pci_map_single(pdev, skb->data,
-                                                         bufsz,
-                                                         PCI_DMA_FROMDEVICE);
+                       bi->skb = skb;
+                       bi->dma = pci_map_single(pdev, skb->data, bufsz,
+                                                PCI_DMA_FROMDEVICE);
                }
                /* Refresh the desc even if buffer_addrs didn't change because
                 * each write-back erases this info. */
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-                       rx_desc->read.pkt_addr =
-                           cpu_to_le64(rx_buffer_info->page_dma);
-                       rx_desc->read.hdr_addr =
-                                       cpu_to_le64(rx_buffer_info->dma);
+                       rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
-                       rx_desc->read.pkt_addr =
-                                       cpu_to_le64(rx_buffer_info->dma);
+                       rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
                }
 
                i++;
                if (i == rx_ring->count)
                        i = 0;
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+               bi = &rx_ring->rx_buffer_info[i];
        }
+
 no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
@@ -367,47 +551,54 @@ no_buffers:
        }
 }
 
+static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
-                              struct ixgbe_ring *rx_ring,
-                              int *work_done, int work_to_do)
+                               struct ixgbe_ring *rx_ring,
+                               int *work_done, int work_to_do)
 {
-       struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
        unsigned int i;
-       u32 upper_len, len, staterr;
-       u16 hdr_info, vlan_tag;
-       bool is_vlan, cleaned = false;
+       u32 len, staterr;
+       u16 hdr_info;
+       bool cleaned = false;
        int cleaned_count = 0;
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
-       upper_len = 0;
        rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        rx_buffer_info = &rx_ring->rx_buffer_info[i];
-       is_vlan = (staterr & IXGBE_RXD_STAT_VP);
-       vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
        while (staterr & IXGBE_RXD_STAT_DD) {
+               u32 upper_len = 0;
                if (*work_done >= work_to_do)
                        break;
                (*work_done)++;
 
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-                       hdr_info =
-                           le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
-                       len =
-                           ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                            IXGBE_RXDADV_HDRBUFLEN_SHIFT);
+                       hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
+                       len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                              IXGBE_RXDADV_HDRBUFLEN_SHIFT;
                        if (hdr_info & IXGBE_RXDADV_SPH)
                                adapter->rx_hdr_split++;
                        if (len > IXGBE_RX_HDR_SIZE)
                                len = IXGBE_RX_HDR_SIZE;
                        upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-               } else
+               } else {
                        len = le16_to_cpu(rx_desc->wb.upper.length);
+               }
 
                cleaned = true;
                skb = rx_buffer_info->skb;
@@ -416,18 +607,25 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
 
                if (len && !skb_shinfo(skb)->nr_frags) {
                        pci_unmap_single(pdev, rx_buffer_info->dma,
-                                        adapter->rx_buf_len + NET_IP_ALIGN,
-                                        PCI_DMA_FROMDEVICE);
+                                        rx_ring->rx_buf_len + NET_IP_ALIGN,
+                                        PCI_DMA_FROMDEVICE);
                        skb_put(skb, len);
                }
 
                if (upper_len) {
                        pci_unmap_page(pdev, rx_buffer_info->page_dma,
-                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                                      PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
                        rx_buffer_info->page_dma = 0;
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                                          rx_buffer_info->page, 0, upper_len);
-                       rx_buffer_info->page = NULL;
+                                          rx_buffer_info->page,
+                                          rx_buffer_info->page_offset,
+                                          upper_len);
+
+                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+                           (page_count(rx_buffer_info->page) != 1))
+                               rx_buffer_info->page = NULL;
+                       else
+                               get_page(rx_buffer_info->page);
 
                        skb->len += upper_len;
                        skb->data_len += upper_len;
@@ -450,6 +648,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
                        rx_buffer_info->skb = next_buffer->skb;
                        rx_buffer_info->dma = next_buffer->dma;
                        next_buffer->skb = skb;
+                       next_buffer->dma = 0;
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
@@ -460,9 +659,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
                }
 
                ixgbe_rx_checksum(adapter, staterr, skb);
-               skb->protocol = eth_type_trans(skb, netdev);
-               ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
-               netdev->last_rx = jiffies;
+
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               skb->protocol = eth_type_trans(skb, adapter->netdev);
+               ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
+               adapter->netdev->last_rx = jiffies;
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -478,8 +682,11 @@ next_desc:
                rx_buffer_info = next_buffer;
 
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-               is_vlan = (staterr & IXGBE_RXD_STAT_VP);
-               vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+       }
+
+       if (rx_ring->lro_used) {
+               lro_flush_all(&rx_ring->lro_mgr);
+               rx_ring->lro_used = false;
        }
 
        rx_ring->next_to_clean = i;
@@ -488,10 +695,15 @@ next_desc:
        if (cleaned_count)
                ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
+       rx_ring->total_packets += total_rx_packets;
+       rx_ring->total_bytes += total_rx_bytes;
+       adapter->net_stats.rx_bytes += total_rx_bytes;
+       adapter->net_stats.rx_packets += total_rx_packets;
+
        return cleaned;
 }
 
-#define IXGBE_MAX_INTR 10
+static int ixgbe_clean_rxonly(struct napi_struct *, int);
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -501,28 +713,209 @@ next_desc:
  **/
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
-       int i, vector = 0;
+       struct ixgbe_q_vector *q_vector;
+       int i, j, q_vectors, v_idx, r_idx;
+       u32 mask;
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
-                              IXGBE_MSIX_VECTOR(vector));
-               writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
-                      adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
-               vector++;
+       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+       /* Populate the IVAR table and set the ITR values to the
+        * corresponding register.
+        */
+       for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+               q_vector = &adapter->q_vector[v_idx];
+               /* XXX for_each_bit(...) */
+               r_idx = find_first_bit(q_vector->rxr_idx,
+                                      adapter->num_rx_queues);
+
+               for (i = 0; i < q_vector->rxr_count; i++) {
+                       j = adapter->rx_ring[r_idx].reg_idx;
+                       ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
+                       r_idx = find_next_bit(q_vector->rxr_idx,
+                                             adapter->num_rx_queues,
+                                             r_idx + 1);
+               }
+               r_idx = find_first_bit(q_vector->txr_idx,
+                                      adapter->num_tx_queues);
+
+               for (i = 0; i < q_vector->txr_count; i++) {
+                       j = adapter->tx_ring[r_idx].reg_idx;
+                       ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
+                       r_idx = find_next_bit(q_vector->txr_idx,
+                                             adapter->num_tx_queues,
+                                             r_idx + 1);
+               }
+
+               /* if this is a tx only vector halve the interrupt rate */
+               if (q_vector->txr_count && !q_vector->rxr_count)
+                       q_vector->eitr = (adapter->eitr_param >> 1);
+               else
+                       /* rx only */
+                       q_vector->eitr = adapter->eitr_param;
+
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
+                               EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
        }
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i),
-                              IXGBE_MSIX_VECTOR(vector));
-               writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr),
-                      adapter->hw.hw_addr + adapter->rx_ring[i].itr_register);
-               vector++;
+       ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
+
+       /* set up to autoclear timer, and the vectors */
+       mask = IXGBE_EIMS_ENABLE_MASK;
+       mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
+}
+
+enum latency_range {
+       lowest_latency = 0,
+       low_latency = 1,
+       bulk_latency = 2,
+       latency_invalid = 255
+};
+
+/**
+ * ixgbe_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see ixgbe_param.c)
+ **/
+static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
+                           u32 eitr, u8 itr_setting,
+                           int packets, int bytes)
+{
+       unsigned int retval = itr_setting;
+       u32 timepassed_us;
+       u64 bytes_perint;
+
+       if (packets == 0)
+               goto update_itr_done;
+
+
+       /* simple throttlerate management
+        *    0-20MB/s lowest (100000 ints/s)
+        *   20-100MB/s low   (20000 ints/s)
+        *  100-1249MB/s bulk (8000 ints/s)
+        */
+       /* what was last interrupt timeslice? */
+       timepassed_us = 1000000/eitr;
+       bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+       switch (itr_setting) {
+       case lowest_latency:
+               if (bytes_perint > adapter->eitr_low)
+                       retval = low_latency;
+               break;
+       case low_latency:
+               if (bytes_perint > adapter->eitr_high)
+                       retval = bulk_latency;
+               else if (bytes_perint <= adapter->eitr_low)
+                       retval = lowest_latency;
+               break;
+       case bulk_latency:
+               if (bytes_perint <= adapter->eitr_high)
+                       retval = low_latency;
+               break;
+       }
+
+update_itr_done:
+       return retval;
+}
+
+static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
+{
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 new_itr;
+       u8 current_itr, ret_itr;
+       int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
+                              sizeof(struct ixgbe_q_vector);
+       struct ixgbe_ring *rx_ring, *tx_ring;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               tx_ring = &(adapter->tx_ring[r_idx]);
+               ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
+                                          q_vector->tx_itr,
+                                          tx_ring->total_packets,
+                                          tx_ring->total_bytes);
+               /* if the result for this queue would decrease interrupt
+                * rate for this vector then use that result */
+               q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+                                   q_vector->tx_itr - 1 : ret_itr);
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
+
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0; i < q_vector->rxr_count; i++) {
+               rx_ring = &(adapter->rx_ring[r_idx]);
+               ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
+                                          q_vector->rx_itr,
+                                          rx_ring->total_packets,
+                                          rx_ring->total_bytes);
+               /* if the result for this queue would decrease interrupt
+                * rate for this vector then use that result */
+               q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+                                   q_vector->rx_itr - 1 : ret_itr);
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
+       }
+
+       current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+       switch (current_itr) {
+       /* counts and packets in update_itr are dependent on these numbers */
+       case lowest_latency:
+               new_itr = 100000;
+               break;
+       case low_latency:
+               new_itr = 20000; /* aka hwitr = ~200 */
+               break;
+       case bulk_latency:
+       default:
+               new_itr = 8000;
+               break;
+       }
+
+       if (new_itr != q_vector->eitr) {
+               u32 itr_reg;
+               /* do an exponential smoothing */
+               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+               q_vector->eitr = new_itr;
+               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+               /* must write high and low 16 bits to reset counter */
+               DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
+                       itr_reg);
+               IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
        }
 
-       vector = adapter->num_tx_queues + adapter->num_rx_queues;
-       ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
-                      IXGBE_MSIX_VECTOR(vector));
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950);
+       return;
+}
+
+
+static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       adapter->lsc_int++;
+       adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+       adapter->link_check_timeout = jiffies;
+       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+               schedule_work(&adapter->watchdog_task);
+       }
 }
 
 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
@@ -532,165 +925,372 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 
-       if (eicr & IXGBE_EICR_LSC) {
-               adapter->lsc_int++;
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       mod_timer(&adapter->watchdog_timer, jiffies);
-       }
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+       if (eicr & IXGBE_EICR_LSC)
+               ixgbe_check_lsc(adapter);
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
        return IRQ_HANDLED;
 }
 
 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 {
-       struct ixgbe_ring *txr = data;
-       struct ixgbe_adapter *adapter = txr->adapter;
-
-       ixgbe_clean_tx_irq(adapter, txr);
+       struct ixgbe_q_vector *q_vector = data;
+       struct ixgbe_adapter  *adapter = q_vector->adapter;
+       struct ixgbe_ring     *tx_ring;
+       int i, r_idx;
+
+       if (!q_vector->txr_count)
+               return IRQ_HANDLED;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               tx_ring = &(adapter->tx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+                       ixgbe_update_tx_dca(adapter, tx_ring);
+#endif
+               tx_ring->total_bytes = 0;
+               tx_ring->total_packets = 0;
+               ixgbe_clean_tx_irq(adapter, tx_ring);
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
 
        return IRQ_HANDLED;
 }
 
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
 {
-       struct ixgbe_ring *rxr = data;
-       struct ixgbe_adapter *adapter = rxr->adapter;
+       struct ixgbe_q_vector *q_vector = data;
+       struct ixgbe_adapter  *adapter = q_vector->adapter;
+       struct ixgbe_ring  *rx_ring;
+       int r_idx;
+       int i;
+
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0;  i < q_vector->rxr_count; i++) {
+               rx_ring = &(adapter->rx_ring[r_idx]);
+               rx_ring->total_bytes = 0;
+               rx_ring->total_packets = 0;
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
+       }
+
+       if (!q_vector->rxr_count)
+               return IRQ_HANDLED;
+
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = &(adapter->rx_ring[r_idx]);
+       /* disable interrupts on this vector only */
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+       netif_rx_schedule(adapter->netdev, &q_vector->napi);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
+{
+       ixgbe_msix_clean_rx(irq, data);
+       ixgbe_msix_clean_tx(irq, data);
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
-       netif_rx_schedule(adapter->netdev, &adapter->napi);
        return IRQ_HANDLED;
 }
 
+/**
+ * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
 {
-       struct ixgbe_adapter *adapter = container_of(napi,
-                                       struct ixgbe_adapter, napi);
-       struct net_device *netdev = adapter->netdev;
+       struct ixgbe_q_vector *q_vector =
+                              container_of(napi, struct ixgbe_q_vector, napi);
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_ring *rx_ring = NULL;
        int work_done = 0;
-       struct ixgbe_ring *rxr = adapter->rx_ring;
+       long r_idx;
 
-       /* Keep link state information with original netdev */
-       if (!netif_carrier_ok(netdev))
-               goto quit_polling;
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = &(adapter->rx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_rx_dca(adapter, rx_ring);
+#endif
 
-       ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
+       ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
 
-       /* If no Tx and not enough Rx work done, exit the polling mode */
-       if ((work_done < budget) || !netif_running(netdev)) {
-quit_polling:
-               netif_rx_complete(netdev, napi);
+       /* If all Rx work done, exit the polling mode */
+       if (work_done < budget) {
+               netif_rx_complete(adapter->netdev, napi);
+               if (adapter->itr_setting & 3)
+                       ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
-                                       rxr->eims_value);
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
        }
 
        return work_done;
 }
 
 /**
- * ixgbe_setup_msix - Initialize MSI-X interrupts
+ * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
  *
- * ixgbe_setup_msix allocates MSI-X vectors and requests
- * interrutps from the kernel.
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
  **/
-static int ixgbe_setup_msix(struct ixgbe_adapter *adapter)
+static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
 {
-       struct net_device *netdev = adapter->netdev;
-       int i, int_vector = 0, err = 0;
-       int max_msix_count;
-
-       /* +1 for the LSC interrupt */
-       max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1;
-       adapter->msix_entries = kcalloc(max_msix_count,
-                                       sizeof(struct msix_entry), GFP_KERNEL);
-       if (!adapter->msix_entries)
-               return -ENOMEM;
+       struct ixgbe_q_vector *q_vector =
+                              container_of(napi, struct ixgbe_q_vector, napi);
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_ring *rx_ring = NULL;
+       int work_done = 0, i;
+       long r_idx;
+       u16 enable_mask = 0;
+
+       /* attempt to distribute budget to each queue fairly, but don't allow
+        * the budget to go below 1 because we'll exit polling */
+       budget /= (q_vector->rxr_count ?: 1);
+       budget = max(budget, 1);
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0; i < q_vector->rxr_count; i++) {
+               rx_ring = &(adapter->rx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+                       ixgbe_update_rx_dca(adapter, rx_ring);
+#endif
+               ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
+               enable_mask |= rx_ring->v_idx;
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
+       }
 
-       for (i = 0; i < max_msix_count; i++)
-               adapter->msix_entries[i].entry = i;
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = &(adapter->rx_ring[r_idx]);
+       /* If all Rx work done, exit the polling mode */
+       if (work_done < budget) {
+               netif_rx_complete(adapter->netdev, napi);
+               if (adapter->itr_setting & 3)
+                       ixgbe_set_itr_msix(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+               return 0;
+       }
 
-       err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                             max_msix_count);
-       if (err)
+       return work_done;
+}
+static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
+                                     int r_idx)
+{
+       a->q_vector[v_idx].adapter = a;
+       set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
+       a->q_vector[v_idx].rxr_count++;
+       a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
+                                     int r_idx)
+{
+       a->q_vector[v_idx].adapter = a;
+       set_bit(r_idx, a->q_vector[v_idx].txr_idx);
+       a->q_vector[v_idx].txr_count++;
+       a->tx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ * @vectors: allotted vector count for descriptor rings
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible.  You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
+                                      int vectors)
+{
+       int v_start = 0;
+       int rxr_idx = 0, txr_idx = 0;
+       int rxr_remaining = adapter->num_rx_queues;
+       int txr_remaining = adapter->num_tx_queues;
+       int i, j;
+       int rqpv, tqpv;
+       int err = 0;
+
+       /* No mapping required if MSI-X is disabled. */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
                goto out;
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i);
-               err = request_irq(adapter->msix_entries[int_vector].vector,
-                                 &ixgbe_msix_clean_tx,
-                                 0,
-                                 adapter->tx_ring[i].name,
-                                 &(adapter->tx_ring[i]));
-               if (err) {
-                       DPRINTK(PROBE, ERR,
-                               "request_irq failed for MSIX interrupt "
-                               "Error: %d\n", err);
-                       goto release_irqs;
+       /*
+        * The ideal configuration...
+        * We have enough vectors to map one per queue.
+        */
+       if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+               for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+                       map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+               for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+                       map_vector_to_txq(adapter, v_start, txr_idx);
+
+               goto out;
+       }
+
+       /*
+        * If we don't have enough vectors for a 1-to-1
+        * mapping, we'll have to group them so there are
+        * multiple queues per vector.
+        */
+       /* Re-adjusting *qpv takes care of the remainder. */
+       for (i = v_start; i < vectors; i++) {
+               rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+               for (j = 0; j < rqpv; j++) {
+                       map_vector_to_rxq(adapter, i, rxr_idx);
+                       rxr_idx++;
+                       rxr_remaining--;
+               }
+       }
+       for (i = v_start; i < vectors; i++) {
+               tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+               for (j = 0; j < tqpv; j++) {
+                       map_vector_to_txq(adapter, i, txr_idx);
+                       txr_idx++;
+                       txr_remaining--;
                }
-               adapter->tx_ring[i].eims_value =
-                   (1 << IXGBE_MSIX_VECTOR(int_vector));
-               adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
-               int_vector++;
        }
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               if (strlen(netdev->name) < (IFNAMSIZ - 5))
-                       sprintf(adapter->rx_ring[i].name,
-                               "%s-rx%d", netdev->name, i);
-               else
-                       memcpy(adapter->rx_ring[i].name,
-                              netdev->name, IFNAMSIZ);
-               err = request_irq(adapter->msix_entries[int_vector].vector,
-                                 &ixgbe_msix_clean_rx, 0,
-                                 adapter->rx_ring[i].name,
-                                 &(adapter->rx_ring[i]));
+out:
+       return err;
+}
+
+/**
+ * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       irqreturn_t (*handler)(int, void *);
+       int i, vector, q_vectors, err;
+
+       /* Decrement for Other and TCP Timer vectors */
+       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+       /* Map the Tx/Rx rings to the vectors we were allotted. */
+       err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+       if (err)
+               goto out;
+
+#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
+                         (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+                         &ixgbe_msix_clean_many)
+       for (vector = 0; vector < q_vectors; vector++) {
+               handler = SET_HANDLER(&adapter->q_vector[vector]);
+               sprintf(adapter->name[vector], "%s:v%d-%s",
+                       netdev->name, vector,
+                       (handler == &ixgbe_msix_clean_rx) ? "Rx" :
+                        ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
+               err = request_irq(adapter->msix_entries[vector].vector,
+                                 handler, 0, adapter->name[vector],
+                                 &(adapter->q_vector[vector]));
                if (err) {
                        DPRINTK(PROBE, ERR,
-                               "request_irq failed for MSIX interrupt "
-                               "Error: %d\n", err);
-                       goto release_irqs;
+                               "request_irq failed for MSIX interrupt "
+                               "Error: %d\n", err);
+                       goto free_queue_irqs;
                }
-
-               adapter->rx_ring[i].eims_value =
-                   (1 << IXGBE_MSIX_VECTOR(int_vector));
-               adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
-               int_vector++;
        }
 
-       sprintf(adapter->lsc_name, "%s-lsc", netdev->name);
-       err = request_irq(adapter->msix_entries[int_vector].vector,
-                         &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev);
+       sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+       err = request_irq(adapter->msix_entries[vector].vector,
+                         &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
        if (err) {
                DPRINTK(PROBE, ERR,
                        "request_irq for msix_lsc failed: %d\n", err);
-               goto release_irqs;
+               goto free_queue_irqs;
        }
 
-       /* FIXME: implement netif_napi_remove() instead */
-       adapter->napi.poll = ixgbe_clean_rxonly;
-       adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
        return 0;
 
-release_irqs:
-       int_vector--;
-       for (; int_vector >= adapter->num_tx_queues; int_vector--)
-               free_irq(adapter->msix_entries[int_vector].vector,
-                        &(adapter->rx_ring[int_vector -
-                                           adapter->num_tx_queues]));
-
-       for (; int_vector >= 0; int_vector--)
-               free_irq(adapter->msix_entries[int_vector].vector,
-                        &(adapter->tx_ring[int_vector]));
-out:
+free_queue_irqs:
+       for (i = vector - 1; i >= 0; i--)
+               free_irq(adapter->msix_entries[--vector].vector,
+                        &(adapter->q_vector[i]));
+       adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+       pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
-       adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+out:
        return err;
 }
 
+static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_q_vector *q_vector = adapter->q_vector;
+       u8 current_itr;
+       u32 new_itr = q_vector->eitr;
+       struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
+       struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+
+       q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
+                                           q_vector->tx_itr,
+                                           tx_ring->total_packets,
+                                           tx_ring->total_bytes);
+       q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
+                                           q_vector->rx_itr,
+                                           rx_ring->total_packets,
+                                           rx_ring->total_bytes);
+
+       current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+       switch (current_itr) {
+       /* counts and packets in update_itr are dependent on these numbers */
+       case lowest_latency:
+               new_itr = 100000;
+               break;
+       case low_latency:
+               new_itr = 20000; /* aka hwitr = ~200 */
+               break;
+       case bulk_latency:
+               new_itr = 8000;
+               break;
+       default:
+               break;
+       }
+
+       if (new_itr != q_vector->eitr) {
+               u32 itr_reg;
+               /* do an exponential smoothing */
+               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+               q_vector->eitr = new_itr;
+               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+               /* must write high and low 16 bits to reset counter */
+               IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
+       }
+
+       return;
+}
+
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
+
 /**
- * ixgbe_intr - Interrupt Handler
+ * ixgbe_intr - legacy mode Interrupt Handler
  * @irq: interrupt number
  * @data: pointer to a network interface device structure
  * @pt_regs: CPU registers structure
@@ -702,27 +1302,45 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 eicr;
 
+       /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
+        * therefore no explict interrupt disable is necessary */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-
-       if (!eicr)
+       if (!eicr) {
+               /* shared interrupt alert!
+                * make sure interrupts are enabled because the read will
+                * have disabled interrupts due to EIAM */
+               ixgbe_irq_enable(adapter);
                return IRQ_NONE;        /* Not our interrupt */
-
-       if (eicr & IXGBE_EICR_LSC) {
-               adapter->lsc_int++;
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       mod_timer(&adapter->watchdog_timer, jiffies);
        }
-       if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
-               /* Disable interrupts and register for poll. The flush of the
-                * posted write is intentionally left out. */
-               atomic_inc(&adapter->irq_sem);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-               __netif_rx_schedule(netdev, &adapter->napi);
+
+       if (eicr & IXGBE_EICR_LSC)
+               ixgbe_check_lsc(adapter);
+
+       if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
+               adapter->tx_ring[0].total_packets = 0;
+               adapter->tx_ring[0].total_bytes = 0;
+               adapter->rx_ring[0].total_packets = 0;
+               adapter->rx_ring[0].total_bytes = 0;
+               /* would disable interrupts here but EIAM disabled it */
+               __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
        }
 
        return IRQ_HANDLED;
 }
 
+static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
+{
+       int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+       for (i = 0; i < q_vectors; i++) {
+               struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+               bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+               bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+               q_vector->rxr_count = 0;
+               q_vector->txr_count = 0;
+       }
+}
+
 /**
  * ixgbe_request_irq - initialize interrupts
  * @adapter: board private structure
@@ -730,40 +1348,24 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
  * Attempts to configure interrupts using the best available
  * capabilities of the hardware and kernel.
  **/
-static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues)
+static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       int flags, err;
-       irq_handler_t handler = ixgbe_intr;
+       int err;
 
-       flags = IRQF_SHARED;
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               err = ixgbe_request_msix_irqs(adapter);
+       } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+               err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+                                 netdev->name, netdev);
+       } else {
+               err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+                                 netdev->name, netdev);
+       }
 
-       err = ixgbe_setup_msix(adapter);
-       if (!err)
-               goto request_done;
-
-       /*
-        * if we can't do MSI-X, fall through and try MSI
-        * No need to reallocate memory since we're decreasing the number of
-        * queues. We just won't use the other ones, also it is freed correctly
-        * on ixgbe_remove.
-        */
-       *num_rx_queues = 1;
-
-       /* do MSI */
-       err = pci_enable_msi(adapter->pdev);
-       if (!err) {
-               adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
-               flags &= ~IRQF_SHARED;
-               handler = &ixgbe_intr;
-       }
-
-       err = request_irq(adapter->pdev->irq, handler, flags,
-                         netdev->name, netdev);
        if (err)
                DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
 
-request_done:
        return err;
 }
 
@@ -772,28 +1374,22 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               int i;
+               int i, q_vectors;
 
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       free_irq(adapter->msix_entries[i].vector,
-                                &(adapter->tx_ring[i]));
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       free_irq(adapter->msix_entries[i +
-                                               adapter->num_tx_queues].vector,
-                               &(adapter->rx_ring[i]));
-               i = adapter->num_rx_queues + adapter->num_tx_queues;
+               q_vectors = adapter->num_msix_vectors;
+
+               i = q_vectors - 1;
                free_irq(adapter->msix_entries[i].vector, netdev);
-               pci_disable_msix(adapter->pdev);
-               kfree(adapter->msix_entries);
-               adapter->msix_entries = NULL;
-               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
-               return;
-       }
 
-       free_irq(adapter->pdev->irq, netdev);
-       if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-               pci_disable_msi(adapter->pdev);
-               adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+               i--;
+               for (; i >= 0; i--) {
+                       free_irq(adapter->msix_entries[i].vector,
+                                &(adapter->q_vector[i]));
+               }
+
+               ixgbe_reset_q_vectors(adapter);
+       } else {
+               free_irq(adapter->pdev->irq, netdev);
        }
 }
 
@@ -803,10 +1399,15 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       atomic_inc(&adapter->irq_sem);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
-       synchronize_irq(adapter->pdev->irq);
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               int i;
+               for (i = 0; i < adapter->num_msix_vectors; i++)
+                       synchronize_irq(adapter->msix_entries[i].vector);
+       } else {
+               synchronize_irq(adapter->pdev->irq);
+       }
 }
 
 /**
@@ -815,15 +1416,10 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 {
-       if (atomic_dec_and_test(&adapter->irq_sem)) {
-               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
-                                       (IXGBE_EIMS_ENABLE_MASK &
-                                        ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
-                               IXGBE_EIMS_ENABLE_MASK);
-               IXGBE_WRITE_FLUSH(&adapter->hw);
-       }
+       u32 mask;
+       mask = IXGBE_EIMS_ENABLE_MASK;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+       IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
 /**
@@ -832,57 +1428,142 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
 {
-       int i;
        struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->rx_eitr)
-               IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
-                               EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
-
-       /* for re-triggering the interrupt in non-NAPI mode */
-       adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
-       adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
+                       EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
 
        ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i);
+       ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
+
+       map_vector_to_rxq(adapter, 0, 0);
+       map_vector_to_txq(adapter, 0, 0);
+
+       DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
 }
 
 /**
- * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
+ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Tx unit of the MAC after a reset.
  **/
 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 {
-       u64 tdba;
+       u64 tdba, tdwba;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 i, tdlen;
+       u32 i, j, tdlen, txctrl;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               tdba = adapter->tx_ring[i].dma;
-               tdlen = adapter->tx_ring[i].count *
-                   sizeof(union ixgbe_adv_tx_desc);
-               IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK));
-               IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
-               IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen);
-               IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
-               adapter->tx_ring[i].head = IXGBE_TDH(i);
-               adapter->tx_ring[i].tail = IXGBE_TDT(i);
+               struct ixgbe_ring *ring = &adapter->tx_ring[i];
+               j = ring->reg_idx;
+               tdba = ring->dma;
+               tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
+                               (tdba & DMA_32BIT_MASK));
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
+               tdwba = ring->dma +
+                       (ring->count * sizeof(union ixgbe_adv_tx_desc));
+               tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
+               IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
+               adapter->tx_ring[i].head = IXGBE_TDH(j);
+               adapter->tx_ring[i].tail = IXGBE_TDT(j);
+               /* Disable Tx Head Writeback RO bit, since this hoses
+                * bookkeeping if things aren't delivered in order.
+                */
+               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+       }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT        2
+
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
+{
+       struct ixgbe_ring *rx_ring;
+       u32 srrctl;
+       int queue0;
+       unsigned long mask;
+
+       /* program one srrctl register per VMDq index */
+       if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+               long shift, len;
+               mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+               len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
+               shift = find_first_bit(&mask, len);
+               queue0 = index & mask;
+               index = (index & mask) >> shift;
+       /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
+       } else {
+               mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+               queue0 = index & mask;
+               index = index & mask;
+       }
+
+       rx_ring = &adapter->rx_ring[queue0];
+
+       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+
+       srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+       srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+
+       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+               srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+               srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               srrctl |= ((IXGBE_RX_HDR_SIZE <<
+                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                          IXGBE_SRRCTL_BSIZEHDR_MASK);
+       } else {
+               srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+               if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+                       srrctl |= IXGBE_RXBUFFER_2048 >>
+                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+               else
+                       srrctl |= rx_ring->rx_buf_len >>
+                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        }
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+}
 
-       IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
+/**
+ * ixgbe_get_skb_hdr - helper function for LRO header processing
+ * @skb: pointer to sk_buff to be added to LRO packet
+ * @iphdr: pointer to ip header structure
+ * @tcph: pointer to tcp header structure
+ * @hdr_flags: pointer to header flags
+ * @priv: private data
+ **/
+static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
+                             u64 *hdr_flags, void *priv)
+{
+       union ixgbe_adv_rx_desc *rx_desc = priv;
+
+       /* Verify that this is a valid IPv4 TCP packet */
+       if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
+            (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
+               return -1;
+
+       /* Set network headers */
+       skb_reset_network_header(skb);
+       skb_set_transport_header(skb, ip_hdrlen(skb));
+       *iphdr = ip_hdr(skb);
+       *tcph = tcp_hdr(skb);
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+       return 0;
 }
 
 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
-                       (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+                           (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
 
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT                        2
 /**
- * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
+ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Rx unit of the MAC after a reset.
@@ -893,32 +1574,33 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       int i, j;
        u32 rdlen, rxctrl, rxcsum;
-       u32 random[10];
-       u32 reta, mrqc;
-       int i;
+       static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
+                         0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
+                         0x6A3E67EA, 0x14364D17, 0x3BED200D};
        u32 fctrl, hlreg0;
-       u32 srrctl;
        u32 pages;
+       u32 reta = 0, mrqc;
+       u32 rdrxctl;
+       int rx_buf_len;
 
        /* Decide whether to use packet split mode or not */
-       if (netdev->mtu > ETH_DATA_LEN)
-               adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-       else
-               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
+               rx_buf_len = IXGBE_RX_HDR_SIZE;
        } else {
                if (netdev->mtu <= ETH_DATA_LEN)
-                       adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+                       rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
                else
-                       adapter->rx_buf_len = ALIGN(max_frame, 1024);
+                       rx_buf_len = ALIGN(max_frame, 1024);
        }
 
        fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
        fctrl |= IXGBE_FCTRL_BAM;
+       fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -930,28 +1612,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 
        pages = PAGE_USE_COUNT(adapter->netdev->mtu);
 
-       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
-       srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
-       srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
-
-       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-               srrctl |= ((IXGBE_RX_HDR_SIZE <<
-                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                          IXGBE_SRRCTL_BSIZEHDR_MASK);
-       } else {
-               srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-               if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-                       srrctl |=
-                            IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               else
-                       srrctl |=
-                            adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       }
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
-
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* disable receives while setting up the descriptors */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -961,119 +1621,137 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
         * the Base and Length of the Rx Descriptor Ring */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rdba = adapter->rx_ring[i].dma;
-               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
-               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
-               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
-               IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
-               adapter->rx_ring[i].head = IXGBE_RDH(i);
-               adapter->rx_ring[i].tail = IXGBE_RDT(i);
-       }
-
-       if (adapter->num_rx_queues > 1) {
-               /* Random 40bytes used as random key in RSS hash function */
-               get_random_bytes(&random[0], 40);
-
-               switch (adapter->num_rx_queues) {
-               case 8:
-               case 4:
-                       /* Bits [3:0] in each byte refers the Rx queue no */
-                       reta = 0x00010203;
-                       break;
-               case 2:
-                       reta = 0x00010001;
-                       break;
-               default:
-                       reta = 0x00000000;
-                       break;
-               }
+               j = adapter->rx_ring[i].reg_idx;
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
+               adapter->rx_ring[i].head = IXGBE_RDH(j);
+               adapter->rx_ring[i].tail = IXGBE_RDT(j);
+               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+               /* Intitial LRO Settings */
+               adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
+               adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
+               adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
+               adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
+               if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+                       adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
+               adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
+               adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+               adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+
+               ixgbe_configure_srrctl(adapter, j);
+       }
+
+       /*
+        * For VMDq support of different descriptor types or
+        * buffer sizes through the use of multiple SRRCTL
+        * registers, RDRXCTL.MVMEN must be set to 1
+        *
+        * also, the manual doesn't mention it clearly but DCA hints
+        * will only use queue 0's tags unless this bit is set.  Side
+        * effects of setting this bit are only that SRRCTL must be
+        * fully programmed [0..15]
+        */
+       rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+       rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 
+
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                /* Fill out redirection table */
-               for (i = 0; i < 32; i++) {
-                       IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta);
-                       if (adapter->num_rx_queues > 4) {
-                               i++;
-                               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i,
-                                                     0x04050607);
-                       }
+               for (i = 0, j = 0; i < 128; i++, j++) {
+                       if (j == adapter->ring_feature[RING_F_RSS].indices)
+                               j = 0;
+                       /* reta = 4-byte sliding window of
+                        * 0x00..(indices-1)(indices-1)00..etc. */
+                       reta = (reta << 8) | (j * 0x11);
+                       if ((i & 3) == 3)
+                               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
                }
 
                /* Fill out hash function seeds */
                for (i = 0; i < 10; i++)
-                       IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]);
+                       IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
 
                mrqc = IXGBE_MRQC_RSSEN
                    /* Perform hash on these packet types */
-                   | IXGBE_MRQC_RSS_FIELD_IPV4
-                   | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-                   | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_EX
-                   | IXGBE_MRQC_RSS_FIELD_IPV6
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+                      | IXGBE_MRQC_RSS_FIELD_IPV4
+                      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+                      | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+                      | IXGBE_MRQC_RSS_FIELD_IPV6
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
                IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+       }
+
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
-               /* Multiqueue and packet checksumming are mutually exclusive. */
-               rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
+           adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
+               /* Disable indicating checksum in descriptor, enables
+                * RSS hash */
                rxcsum |= IXGBE_RXCSUM_PCSD;
-               IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-       } else {
-               /* Enable Receive Checksum Offload for TCP and UDP */
-               rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
-               if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
-                       /* Enable IPv4 payload checksum for UDP fragments
-                        * Must be used in conjunction with packet-split. */
-                       rxcsum |= IXGBE_RXCSUM_IPPCSE;
-               } else {
-                       /* don't need to clear IPPCSE as it defaults to 0 */
-               }
-               IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
        }
-       /* Enable Receives */
-       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
-       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
+               /* Enable IPv4 payload checksum for UDP fragments
+                * if PCSD is not set */
+               rxcsum |= IXGBE_RXCSUM_IPPCSE;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 }
 
 static void ixgbe_vlan_rx_register(struct net_device *netdev,
-                                  struct vlan_group *grp)
+                                   struct vlan_group *grp)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        u32 ctrl;
 
-       ixgbe_irq_disable(adapter);
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_disable(adapter);
        adapter->vlgrp = grp;
 
        if (grp) {
                /* enable VLAN tag insert/strip */
                ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-               ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+               ctrl |= IXGBE_VLNCTRL_VME;
                ctrl &= ~IXGBE_VLNCTRL_CFIEN;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
        }
 
-       ixgbe_irq_enable(adapter);
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter);
 }
 
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
 
        /* add VID to filter table */
-       ixgbe_set_vfta(&adapter->hw, vid, 0, true);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
 }
 
 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_disable(adapter);
 
-       ixgbe_irq_disable(adapter);
        vlan_group_set_device(adapter->vlgrp, vid, NULL);
-       ixgbe_irq_enable(adapter);
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter);
 
        /* remove VID from filter table */
-       ixgbe_set_vfta(&adapter->hw, vid, 0, false);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
 }
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -1090,61 +1768,116 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
        }
 }
 
+static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
+{
+       struct dev_mc_list *mc_ptr;
+       u8 *addr = *mc_addr_ptr;
+       *vmdq = 0;
+
+       mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+       if (mc_ptr->next)
+               *mc_addr_ptr = mc_ptr->next->dmi_addr;
+       else
+               *mc_addr_ptr = NULL;
+
+       return addr;
+}
+
 /**
- * ixgbe_set_multi - Multicast and Promiscuous mode set
+ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
  **/
-static void ixgbe_set_multi(struct net_device *netdev)
+static void ixgbe_set_rx_mode(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       struct dev_mc_list *mc_ptr;
-       u8 *mta_list;
-       u32 fctrl;
-       int i;
+       u32 fctrl, vlnctrl;
+       u8 *addr_list = NULL;
+       int addr_count = 0;
 
        /* Check for Promiscuous and All Multicast modes */
 
        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
        if (netdev->flags & IFF_PROMISC) {
+               hw->addr_ctrl.user_set_promisc = 1;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-       } else if (netdev->flags & IFF_ALLMULTI) {
-               fctrl |= IXGBE_FCTRL_MPE;
-               fctrl &= ~IXGBE_FCTRL_UPE;
+               vlnctrl &= ~IXGBE_VLNCTRL_VFE;
        } else {
-               fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+               if (netdev->flags & IFF_ALLMULTI) {
+                       fctrl |= IXGBE_FCTRL_MPE;
+                       fctrl &= ~IXGBE_FCTRL_UPE;
+               } else {
+                       fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+               }
+               vlnctrl |= IXGBE_VLNCTRL_VFE;
+               hw->addr_ctrl.user_set_promisc = 0;
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
+       /* reprogram secondary unicast list */
+       addr_count = netdev->uc_count;
+       if (addr_count)
+               addr_list = netdev->uc_list->dmi_addr;
+       hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
+                                         ixgbe_addr_list_itr);
+
+       /* reprogram multicast list */
+       addr_count = netdev->mc_count;
+       if (addr_count)
+               addr_list = netdev->mc_list->dmi_addr;
+       hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+                                       ixgbe_addr_list_itr);
+}
 
-       if (netdev->mc_count) {
-               mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
-               if (!mta_list)
-                       return;
+static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
+{
+       int q_idx;
+       struct ixgbe_q_vector *q_vector;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+       /* legacy and MSI only use one vector */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+               q_vectors = 1;
+
+       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+               struct napi_struct *napi;
+               q_vector = &adapter->q_vector[q_idx];
+               if (!q_vector->rxr_count)
+                       continue;
+               napi = &q_vector->napi;
+               if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
+                   (q_vector->rxr_count > 1))
+                       napi->poll = &ixgbe_clean_rxonly_many;
 
-               /* Shared function expects packed array of only addresses. */
-               mc_ptr = netdev->mc_list;
+               napi_enable(napi);
+       }
+}
 
-               for (i = 0; i < netdev->mc_count; i++) {
-                       if (!mc_ptr)
-                               break;
-                       memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
-                              ETH_ALEN);
-                       mc_ptr = mc_ptr->next;
-               }
+static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
+{
+       int q_idx;
+       struct ixgbe_q_vector *q_vector;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-               ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
-               kfree(mta_list);
-       } else {
-               ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
-       }
+       /* legacy and MSI only use one vector */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+               q_vectors = 1;
 
+       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+               q_vector = &adapter->q_vector[q_idx];
+               if (!q_vector->rxr_count)
+                       continue;
+               napi_disable(&q_vector->napi);
+       }
 }
 
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -1152,7 +1885,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int i;
 
-       ixgbe_set_multi(netdev);
+       ixgbe_set_rx_mode(netdev);
 
        ixgbe_restore_vlan(adapter);
 
@@ -1160,34 +1893,41 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        ixgbe_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++)
                ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
-                                          (adapter->rx_ring[i].count - 1));
+                                      (adapter->rx_ring[i].count - 1));
 }
 
 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       int i;
-       u32 gpie = 0;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 txdctl, rxdctl, mhadd;
+       int i, j = 0;
        int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       u32 txdctl, rxdctl, mhadd;
+       u32 gpie;
+
+       ixgbe_get_hw_control(adapter);
 
-       if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
-                             IXGBE_FLAG_MSI_ENABLED)) {
+       if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
+           (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
                if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                        gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
-                               IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
+                               IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
                } else {
                        /* MSI only */
-                       gpie = (IXGBE_GPIE_EIAME |
-                               IXGBE_GPIE_PBA_SUPPORT);
+                       gpie = 0;
                }
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
-               gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+               /* XXX: to interrupt immediately for EICS writes, enable this */
+               /* gpie |= IXGBE_GPIE_EIMEN; */
+               IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
 
-       mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+               /* legacy interrupts, use EIAM to auto-mask when reading EICR,
+                * specifically only auto mask tx and rx interrupts */
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+       }
 
+       mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
        if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
                mhadd &= ~IXGBE_MHADD_MFS_MASK;
                mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
@@ -1196,15 +1936,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        }
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
+               j = adapter->tx_ring[i].reg_idx;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+               /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+               txdctl |= (8 << 16);
                txdctl |= IXGBE_TXDCTL_ENABLE;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
+               j = adapter->rx_ring[i].reg_idx;
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+               /* enable PTHRESH=32 descriptors (half the internal cache)
+                * and HTHRESH=0 descriptors (to minimize latency on fetch),
+                * this also removes a pesky rx_no_buffer_count increment */
+               rxdctl |= 0x0020;
                rxdctl |= IXGBE_RXDCTL_ENABLE;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
        }
        /* enable all receives */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1217,15 +1965,31 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                ixgbe_configure_msi_and_legacy(adapter);
 
        clear_bit(__IXGBE_DOWN, &adapter->state);
-       napi_enable(&adapter->napi);
+       ixgbe_napi_enable_all(adapter);
+
+       /* clear any pending interrupts, may auto mask */
+       IXGBE_READ_REG(hw, IXGBE_EICR);
+
        ixgbe_irq_enable(adapter);
 
        /* bring the link up in the watchdog, this could race with our first
         * link up interrupt but shouldn't be a problem */
+       adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+       adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
        return 0;
 }
 
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+       WARN_ON(in_interrupt());
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               msleep(1);
+       ixgbe_down(adapter);
+       ixgbe_up(adapter);
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+
 int ixgbe_up(struct ixgbe_adapter *adapter)
 {
        /* hardware has been reset, we need to reload some things */
@@ -1236,50 +2000,14 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
 
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
-       if (ixgbe_init_hw(&adapter->hw))
-               DPRINTK(PROBE, ERR, "Hardware Error\n");
+       struct ixgbe_hw *hw = &adapter->hw;
+       if (hw->mac.ops.init_hw(hw))
+               dev_err(&adapter->pdev->dev, "Hardware Error\n");
 
        /* reprogram the RAR[0] in case user changed it. */
-       ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
-
-}
-
-#ifdef CONFIG_PM
-static int ixgbe_resume(struct pci_dev *pdev)
-{
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u32 err, num_rx_queues = adapter->num_rx_queues;
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       err = pci_enable_device(pdev);
-       if (err) {
-               printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
-                               "suspend\n");
-               return err;
-       }
-       pci_set_master(pdev);
-
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
-
-       if (netif_running(netdev)) {
-               err = ixgbe_request_irq(adapter, &num_rx_queues);
-               if (err)
-                       return err;
-       }
-
-       ixgbe_reset(adapter);
-
-       if (netif_running(netdev))
-               ixgbe_up(adapter);
-
-       netif_device_attach(netdev);
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 
-       return 0;
 }
-#endif
 
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
@@ -1287,7 +2015,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
  * @rx_ring: ring to free buffers from
  **/
 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *rx_ring)
+                                struct ixgbe_ring *rx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
        unsigned long size;
@@ -1301,8 +2029,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                rx_buffer_info = &rx_ring->rx_buffer_info[i];
                if (rx_buffer_info->dma) {
                        pci_unmap_single(pdev, rx_buffer_info->dma,
-                                        adapter->rx_buf_len,
-                                        PCI_DMA_FROMDEVICE);
+                                        rx_ring->rx_buf_len,
+                                        PCI_DMA_FROMDEVICE);
                        rx_buffer_info->dma = 0;
                }
                if (rx_buffer_info->skb) {
@@ -1311,12 +2039,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                }
                if (!rx_buffer_info->page)
                        continue;
-               pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
-                              PCI_DMA_FROMDEVICE);
+               pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+                              PCI_DMA_FROMDEVICE);
                rx_buffer_info->page_dma = 0;
-
                put_page(rx_buffer_info->page);
                rx_buffer_info->page = NULL;
+               rx_buffer_info->page_offset = 0;
        }
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -1338,7 +2066,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
  * @tx_ring: ring to be cleaned
  **/
 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *tx_ring)
+                                struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned long size;
@@ -1365,127 +2093,127 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 }
 
 /**
- * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  * @adapter: board private structure
  **/
-static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
+static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
- * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  * @adapter: board private structure
  **/
-static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
+static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
+       u32 txdctl;
+       int i, j;
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
 
        /* disable receives */
-       rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
-                       rxctrl & ~IXGBE_RXCTRL_RXEN);
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
        netif_tx_disable(netdev);
 
-       /* disable transmits in the hardware */
-
-       /* flush both disables */
-       IXGBE_WRITE_FLUSH(&adapter->hw);
+       IXGBE_WRITE_FLUSH(hw);
        msleep(10);
 
-       napi_disable(&adapter->napi);
-       atomic_set(&adapter->irq_sem, 0);
+       netif_tx_stop_all_queues(netdev);
 
        ixgbe_irq_disable(adapter);
 
+       ixgbe_napi_disable_all(adapter);
+
        del_timer_sync(&adapter->watchdog_timer);
+       cancel_work_sync(&adapter->watchdog_task);
 
-       netif_carrier_off(netdev);
-       netif_stop_queue(netdev);
+       /* disable transmits in the hardware now that interrupts are off */
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               j = adapter->tx_ring[i].reg_idx;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+                               (txdctl & ~IXGBE_TXDCTL_ENABLE));
+       }
 
-       ixgbe_reset(adapter);
-       ixgbe_clean_all_tx_rings(adapter);
-       ixgbe_clean_all_rx_rings(adapter);
+       netif_carrier_off(netdev);
 
-}
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+               adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+               dca_remove_requester(&adapter->pdev->dev);
+       }
 
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
-       int retval = 0;
 #endif
+       if (!pci_channel_offline(adapter->pdev))
+               ixgbe_reset(adapter);
+       ixgbe_clean_all_tx_rings(adapter);
+       ixgbe_clean_all_rx_rings(adapter);
 
-       netif_device_detach(netdev);
-
-       if (netif_running(netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_free_irq(adapter);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       /* since we reset the hardware DCA settings were cleared */
+       if (dca_add_requester(&adapter->pdev->dev) == 0) {
+               adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+               /* always use CB2 mode, difference is masked
+                * in the CB driver */
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+               ixgbe_setup_dca(adapter);
        }
-
-#ifdef CONFIG_PM
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
 #endif
-
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
-
-       pci_disable_device(pdev);
-
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
-       return 0;
-}
-
-static void ixgbe_shutdown(struct pci_dev *pdev)
-{
-       ixgbe_suspend(pdev, PMSG_SUSPEND);
 }
 
 /**
- * ixgbe_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * ixgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
  **/
-static int ixgbe_clean(struct napi_struct *napi, int budget)
+static int ixgbe_poll(struct napi_struct *napi, int budget)
 {
-       struct ixgbe_adapter *adapter = container_of(napi,
-                                       struct ixgbe_adapter, napi);
-       struct net_device *netdev = adapter->netdev;
-       int tx_cleaned = 0, work_done = 0;
+       struct ixgbe_q_vector *q_vector = container_of(napi,
+                                                 struct ixgbe_q_vector, napi);
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       int tx_cleaned, work_done = 0;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+               ixgbe_update_tx_dca(adapter, adapter->tx_ring);
+               ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+       }
+#endif
 
-       /* In non-MSIX case, there is no multi-Tx/Rx queue */
        tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
-       ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
-                          budget);
+       ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
 
        if (tx_cleaned)
                work_done = budget;
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               netif_rx_complete(netdev, napi);
-               ixgbe_irq_enable(adapter);
+               netif_rx_complete(adapter->netdev, napi);
+               if (adapter->itr_setting & 3)
+                       ixgbe_set_itr(adapter);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable(adapter);
        }
-
        return work_done;
 }
 
@@ -1508,8 +2236,129 @@ static void ixgbe_reset_task(struct work_struct *work)
 
        adapter->tx_timeout_count++;
 
-       ixgbe_down(adapter);
-       ixgbe_up(adapter);
+       ixgbe_reinit_locked(adapter);
+}
+
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+       int nrq = 1, ntq = 1;
+       int feature_mask = 0, rss_i, rss_m;
+
+       /* Number of supported queues */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               rss_i = adapter->ring_feature[RING_F_RSS].indices;
+               rss_m = 0;
+               feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+
+               switch (adapter->flags & feature_mask) {
+               case (IXGBE_FLAG_RSS_ENABLED):
+                       rss_m = 0xF;
+                       nrq = rss_i;
+                       ntq = rss_i;
+                       break;
+               case 0:
+               default:
+                       rss_i = 0;
+                       rss_m = 0;
+                       nrq = 1;
+                       ntq = 1;
+                       break;
+               }
+
+               adapter->ring_feature[RING_F_RSS].indices = rss_i;
+               adapter->ring_feature[RING_F_RSS].mask = rss_m;
+               break;
+       default:
+               nrq = 1;
+               ntq = 1;
+               break;
+       }
+
+       adapter->num_rx_queues = nrq;
+       adapter->num_tx_queues = ntq;
+}
+
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+                                       int vectors)
+{
+       int err, vector_threshold;
+
+       /* We'll want at least 3 (vector_threshold):
+        * 1) TxQ[0] Cleanup
+        * 2) RxQ[0] Cleanup
+        * 3) Other (Link Status Change, etc.)
+        * 4) TCP Timer (optional)
+        */
+       vector_threshold = MIN_MSIX_COUNT;
+
+       /* The more we get, the more we will assign to Tx/Rx Cleanup
+        * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+        * Right now, we simply care about how many we'll get; we'll
+        * set them up later while requesting irq's.
+        */
+       while (vectors >= vector_threshold) {
+               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+                                     vectors);
+               if (!err) /* Success in acquiring all requested vectors. */
+                       break;
+               else if (err < 0)
+                       vectors = 0; /* Nasty failure, quit now */
+               else /* err == number of vectors we should try again with */
+                       vectors = err;
+       }
+
+       if (vectors < vector_threshold) {
+               /* Can't allocate enough MSI-X interrupts?  Oh well.
+                * This just means we'll go with either a single MSI
+                * vector or fall back to legacy interrupts.
+                */
+               DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
+               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+               kfree(adapter->msix_entries);
+               adapter->msix_entries = NULL;
+               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+               ixgbe_set_num_queues(adapter);
+       } else {
+               adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+               adapter->num_msix_vectors = vectors;
+       }
+}
+
+/**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ **/
+static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+{
+       int feature_mask = 0, rss_i;
+       int i, txr_idx, rxr_idx;
+
+       /* Number of supported queues */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               rss_i = adapter->ring_feature[RING_F_RSS].indices;
+               txr_idx = 0;
+               rxr_idx = 0;
+               feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+               switch (adapter->flags & feature_mask) {
+               case (IXGBE_FLAG_RSS_ENABLED):
+                       for (i = 0; i < adapter->num_rx_queues; i++)
+                               adapter->rx_ring[i].reg_idx = i;
+                       for (i = 0; i < adapter->num_tx_queues; i++)
+                               adapter->tx_ring[i].reg_idx = i;
+                       break;
+               case 0:
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
 }
 
 /**
@@ -1525,27 +2374,168 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
        int i;
 
        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-                                  sizeof(struct ixgbe_ring), GFP_KERNEL);
+                                  sizeof(struct ixgbe_ring), GFP_KERNEL);
        if (!adapter->tx_ring)
-               return -ENOMEM;
-
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
+               goto err_tx_ring_allocation;
 
        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-                                  sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if (!adapter->rx_ring) {
-               kfree(adapter->tx_ring);
-               return -ENOMEM;
+                                  sizeof(struct ixgbe_ring), GFP_KERNEL);
+       if (!adapter->rx_ring)
+               goto err_rx_ring_allocation;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               adapter->tx_ring[i].count = adapter->tx_ring_count;
+               adapter->tx_ring[i].queue_index = i;
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i].adapter = adapter;
-               adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
-               adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
+               adapter->rx_ring[i].count = adapter->rx_ring_count;
+               adapter->rx_ring[i].queue_index = i;
+       }
+
+       ixgbe_cache_ring_register(adapter);
+
+       return 0;
+
+err_rx_ring_allocation:
+       kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+       return -ENOMEM;
+}
+
+/**
+ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
+                                                    *adapter)
+{
+       int err = 0;
+       int vector, v_budget;
+
+       /*
+        * It's easy to be greedy for MSI-X vectors, but it really
+        * doesn't do us much good if we have a lot more vectors
+        * than CPU's.  So let's be conservative and only ask for
+        * (roughly) twice the number of vectors as there are CPU's.
+        */
+       v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+                      (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+       /*
+        * At the same time, hardware can only support a maximum of
+        * MAX_MSIX_COUNT vectors.  With features such as RSS and VMDq,
+        * we can easily reach upwards of 64 Rx descriptor queues and
+        * 32 Tx queues.  Thus, we cap it off in those rare cases where
+        * the cpu count also exceeds our vector limit.
+        */
+       v_budget = min(v_budget, MAX_MSIX_COUNT);
+
+       /* A failure in MSI-X entry allocation isn't fatal, but it does
+        * mean we disable MSI-X capabilities of the adapter. */
+       adapter->msix_entries = kcalloc(v_budget,
+                                       sizeof(struct msix_entry), GFP_KERNEL);
+       if (!adapter->msix_entries) {
+               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+               ixgbe_set_num_queues(adapter);
+               kfree(adapter->tx_ring);
+               kfree(adapter->rx_ring);
+               err = ixgbe_alloc_queues(adapter);
+               if (err) {
+                       DPRINTK(PROBE, ERR, "Unable to allocate memory "
+                               "for queues\n");
+                       goto out;
+               }
+
+               goto try_msi;
        }
 
+       for (vector = 0; vector < v_budget; vector++)
+               adapter->msix_entries[vector].entry = vector;
+
+       ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               goto out;
+
+try_msi:
+       err = pci_enable_msi(adapter->pdev);
+       if (!err) {
+               adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+       } else {
+               DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
+                       "falling back to legacy.  Error: %d\n", err);
+               /* reset err */
+               err = 0;
+       }
+
+out:
+       /* Notify the stack of the (possibly) reduced Tx Queue count. */
+       adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
+
+       return err;
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+               pci_disable_msix(adapter->pdev);
+               kfree(adapter->msix_entries);
+               adapter->msix_entries = NULL;
+       } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+               adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+               pci_disable_msi(adapter->pdev);
+       }
+       return;
+}
+
+/**
+ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ *   - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ *   - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+       int err;
+
+       /* Number of supported queues */
+       ixgbe_set_num_queues(adapter);
+
+       err = ixgbe_alloc_queues(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+               goto err_alloc_queues;
+       }
+
+       err = ixgbe_set_interrupt_capability(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
+               goto err_set_interrupt;
+       }
+
+       DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
+               "Tx Queue count = %u\n",
+               (adapter->num_rx_queues > 1) ? "Enabled" :
+               "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+       set_bit(__IXGBE_DOWN, &adapter->state);
+
        return 0;
+
+err_set_interrupt:
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+err_alloc_queues:
+       return err;
 }
 
 /**
@@ -1560,39 +2550,53 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
+       unsigned int rss;
+
+       /* PCI config space info */
+
+       hw->vendor_id = pdev->vendor;
+       hw->device_id = pdev->device;
+       hw->revision_id = pdev->revision;
+       hw->subsystem_vendor_id = pdev->subsystem_vendor;
+       hw->subsystem_device_id = pdev->subsystem_device;
+
+       /* Set capability flags */
+       rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+       adapter->ring_feature[RING_F_RSS].indices = rss;
+       adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
 
        /* default flow control settings */
-       hw->fc.original_type = ixgbe_fc_full;
-       hw->fc.type = ixgbe_fc_full;
+       hw->fc.original_type = ixgbe_fc_none;
+       hw->fc.type = ixgbe_fc_none;
+       hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
+       hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+       hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+       hw->fc.send_xon = true;
 
+       /* select 10G link by default */
        hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
-       if (hw->mac.ops.reset(hw)) {
-               dev_err(&pdev->dev, "HW Init failed\n");
-               return -EIO;
-       }
-       if (hw->phy.ops.setup_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
-                                  false)) {
-               dev_err(&pdev->dev, "Link Speed setup failed\n");
-               return -EIO;
-       }
+
+       /* enable itr by default in dynamic mode */
+       adapter->itr_setting = 1;
+       adapter->eitr_param = 20000;
+
+       /* set defaults for eitr in MegaBytes */
+       adapter->eitr_low = 10;
+       adapter->eitr_high = 20;
+
+       /* set default ring sizes */
+       adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
+       adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
        /* initialize eeprom parameters */
-       if (ixgbe_init_eeprom(hw)) {
+       if (ixgbe_init_eeprom_params_generic(hw)) {
                dev_err(&pdev->dev, "EEPROM initialization failed\n");
                return -EIO;
        }
 
-       /* Set the default values */
-       adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
-       adapter->num_tx_queues = 1;
+       /* enable rx csum by default */
        adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 
-       if (ixgbe_alloc_queues(adapter)) {
-               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
-               return -ENOMEM;
-       }
-
-       atomic_set(&adapter->irq_sem, 1);
        set_bit(__IXGBE_DOWN, &adapter->state);
 
        return 0;
@@ -1601,88 +2605,146 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 /**
  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  * @adapter: board private structure
- * @txdr:    tx descriptor ring (for a specific queue) to setup
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *txdr)
+                             struct ixgbe_ring *tx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
        int size;
 
-       size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
-       txdr->tx_buffer_info = vmalloc(size);
-       if (!txdr->tx_buffer_info) {
-               DPRINTK(PROBE, ERR,
-               "Unable to allocate memory for the transmit descriptor ring\n");
-               return -ENOMEM;
-       }
-       memset(txdr->tx_buffer_info, 0, size);
+       size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+       tx_ring->tx_buffer_info = vmalloc(size);
+       if (!tx_ring->tx_buffer_info)
+               goto err;
+       memset(tx_ring->tx_buffer_info, 0, size);
 
        /* round up to nearest 4K */
-       txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
-       txdr->size = ALIGN(txdr->size, 4096);
+       tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
+                       sizeof(u32);
+       tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
-       if (!txdr->desc) {
-               vfree(txdr->tx_buffer_info);
-               DPRINTK(PROBE, ERR,
-                       "Memory allocation failed for the tx desc ring\n");
-               return -ENOMEM;
-       }
-
-       txdr->adapter = adapter;
-       txdr->next_to_use = 0;
-       txdr->next_to_clean = 0;
-       txdr->work_limit = txdr->count;
-       spin_lock_init(&txdr->tx_lock);
+       tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+                                            &tx_ring->dma);
+       if (!tx_ring->desc)
+               goto err;
 
+       tx_ring->next_to_use = 0;
+       tx_ring->next_to_clean = 0;
+       tx_ring->work_limit = tx_ring->count;
        return 0;
+
+err:
+       vfree(tx_ring->tx_buffer_info);
+       tx_ring->tx_buffer_info = NULL;
+       DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
+                           "descriptor ring\n");
+       return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+       int i, err = 0;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               if (!err)
+                       continue;
+               DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+               break;
+       }
+
+       return err;
 }
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  * @adapter: board private structure
- * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rxdr)
+                             struct ixgbe_ring *rx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
-       int size, desc_len;
+       int size;
 
-       size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
-       rxdr->rx_buffer_info = vmalloc(size);
-       if (!rxdr->rx_buffer_info) {
-               DPRINTK(PROBE, ERR,
-                       "vmalloc allocation failed for the rx desc ring\n");
+       size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
+       rx_ring->lro_mgr.lro_arr = vmalloc(size);
+       if (!rx_ring->lro_mgr.lro_arr)
                return -ENOMEM;
-       }
-       memset(rxdr->rx_buffer_info, 0, size);
+       memset(rx_ring->lro_mgr.lro_arr, 0, size);
 
-       desc_len = sizeof(union ixgbe_adv_rx_desc);
+       size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+       rx_ring->rx_buffer_info = vmalloc(size);
+       if (!rx_ring->rx_buffer_info) {
+               DPRINTK(PROBE, ERR,
+                       "vmalloc allocation failed for the rx desc ring\n");
+               goto alloc_failed;
+       }
+       memset(rx_ring->rx_buffer_info, 0, size);
 
        /* Round up to nearest 4K */
-       rxdr->size = rxdr->count * desc_len;
-       rxdr->size = ALIGN(rxdr->size, 4096);
+       rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+       rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-       rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+       rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
 
-       if (!rxdr->desc) {
+       if (!rx_ring->desc) {
                DPRINTK(PROBE, ERR,
-                       "Memory allocation failed for the rx desc ring\n");
-               vfree(rxdr->rx_buffer_info);
-               return -ENOMEM;
+                       "Memory allocation failed for the rx desc ring\n");
+               vfree(rx_ring->rx_buffer_info);
+               goto alloc_failed;
        }
 
-       rxdr->next_to_clean = 0;
-       rxdr->next_to_use = 0;
-       rxdr->adapter = adapter;
+       rx_ring->next_to_clean = 0;
+       rx_ring->next_to_use = 0;
 
        return 0;
+
+alloc_failed:
+       vfree(rx_ring->lro_mgr.lro_arr);
+       rx_ring->lro_mgr.lro_arr = NULL;
+       return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+       int i, err = 0;
+
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               if (!err)
+                       continue;
+               DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+               break;
+       }
+
+       return err;
 }
 
 /**
@@ -1692,8 +2754,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
  *
  * Free all transmit software resources
  **/
-static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-                                   struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
+                             struct ixgbe_ring *tx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
 
@@ -1728,11 +2790,14 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
  *
  * Free all receive software resources
  **/
-static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
-                                   struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
+                             struct ixgbe_ring *rx_ring)
 {
        struct pci_dev *pdev = adapter->pdev;
 
+       vfree(rx_ring->lro_mgr.lro_arr);
+       rx_ring->lro_mgr.lro_arr = NULL;
+
        ixgbe_clean_rx_ring(adapter, rx_ring);
 
        vfree(rx_ring->rx_buffer_info);
@@ -1751,65 +2816,10 @@ static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
  **/
 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 {
-       int i;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
-}
-
-/**
- * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources
- *                               (Descriptors) for all queues
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
-{
-       int i, err = 0;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
-               if (err) {
-                       DPRINTK(PROBE, ERR,
-                               "Allocation for Tx Queue %u failed\n", i);
-                       break;
-               }
-       }
-
-       return err;
-}
-
-/**
- * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources
- *                               (Descriptors) for all queues
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-
-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
-{
-       int i, err = 0;
-
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
-               if (err) {
-                       DPRINTK(PROBE, ERR,
-                               "Allocation for Rx Queue %u failed\n", i);
-                       break;
-               }
-       }
+       int i;
 
-       return err;
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -1824,16 +2834,17 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
-       if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
-           (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+       /* MTU < 68 is an error and causes problems on some kernels */
+       if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
                return -EINVAL;
 
+       DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
+               netdev->mtu, new_mtu);
+       /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
-       if (netif_running(netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_up(adapter);
-       }
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
 
        return 0;
 }
@@ -1854,25 +2865,16 @@ static int ixgbe_open(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int err;
-       u32 ctrl_ext;
-       u32 num_rx_queues = adapter->num_rx_queues;
 
-       /* Let firmware know the driver has taken over */
-       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
-                       ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+       /* disallow open during test */
+       if (test_bit(__IXGBE_TESTING, &adapter->state))
+               return -EBUSY;
 
-try_intr_reinit:
        /* allocate transmit descriptors */
        err = ixgbe_setup_all_tx_resources(adapter);
        if (err)
                goto err_setup_tx;
 
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-               num_rx_queues = 1;
-               adapter->num_rx_queues = num_rx_queues;
-       }
-
        /* allocate receive descriptors */
        err = ixgbe_setup_all_rx_resources(adapter);
        if (err)
@@ -1880,38 +2882,20 @@ try_intr_reinit:
 
        ixgbe_configure(adapter);
 
-       err = ixgbe_request_irq(adapter, &num_rx_queues);
+       err = ixgbe_request_irq(adapter);
        if (err)
                goto err_req_irq;
 
-       /* ixgbe_request might have reduced num_rx_queues */
-       if (num_rx_queues < adapter->num_rx_queues) {
-               /* We didn't get MSI-X, so we need to release everything,
-                * set our Rx queue count to num_rx_queues, and redo the
-                * whole init process.
-                */
-               ixgbe_free_irq(adapter);
-               if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-                       pci_disable_msi(adapter->pdev);
-                       adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
-               }
-               ixgbe_free_all_rx_resources(adapter);
-               ixgbe_free_all_tx_resources(adapter);
-               adapter->num_rx_queues = num_rx_queues;
-
-               /* Reset the hardware, and start over. */
-               ixgbe_reset(adapter);
-
-               goto try_intr_reinit;
-       }
-
        err = ixgbe_up_complete(adapter);
        if (err)
                goto err_up;
 
+       netif_tx_start_all_queues(netdev);
+
        return 0;
 
 err_up:
+       ixgbe_release_hw_control(adapter);
        ixgbe_free_irq(adapter);
 err_req_irq:
        ixgbe_free_all_rx_resources(adapter);
@@ -1937,7 +2921,6 @@ err_setup_tx:
 static int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u32 ctrl_ext;
 
        ixgbe_down(adapter);
        ixgbe_free_irq(adapter);
@@ -1945,13 +2928,140 @@ static int ixgbe_close(struct net_device *netdev)
        ixgbe_free_all_tx_resources(adapter);
        ixgbe_free_all_rx_resources(adapter);
 
-       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
-                       ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+       ixgbe_release_hw_control(adapter);
+
+       return 0;
+}
+
+/**
+ * ixgbe_napi_add_all - prep napi structs for use
+ * @adapter: private struct
+ * helper function to napi_add each possible q_vector->napi
+ */
+static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
+{
+       int q_idx, q_vectors;
+       int (*poll)(struct napi_struct *, int);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               poll = &ixgbe_clean_rxonly;
+               /* Only enable as many vectors as we have rx queues. */
+               q_vectors = adapter->num_rx_queues;
+       } else {
+               poll = &ixgbe_poll;
+               /* only one q_vector for legacy modes */
+               q_vectors = 1;
+       }
+
+       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
+               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
+       }
+}
+
+static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
+{
+       int q_idx;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+       /* legacy and MSI only use one vector */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+               q_vectors = 1;
+
+       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
+               if (!q_vector->rxr_count)
+                       continue;
+               netif_napi_del(&q_vector->napi);
+       }
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_resume(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u32 err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       err = pci_enable_device(pdev);
+       if (err) {
+               printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
+                               "suspend\n");
+               return err;
+       }
+       pci_set_master(pdev);
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       err = ixgbe_init_interrupt_scheme(adapter);
+       if (err) {
+               printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
+                               "device\n");
+               return err;
+       }
+
+       ixgbe_napi_add_all(adapter);
+       ixgbe_reset(adapter);
+
+       if (netif_running(netdev)) {
+               err = ixgbe_open(adapter->netdev);
+               if (err)
+                       return err;
+       }
+
+       netif_device_attach(netdev);
+
+       return 0;
+}
+
+#endif /* CONFIG_PM */
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+       int retval = 0;
+#endif
+
+       netif_device_detach(netdev);
+
+       if (netif_running(netdev)) {
+               ixgbe_down(adapter);
+               ixgbe_free_irq(adapter);
+               ixgbe_free_all_tx_resources(adapter);
+               ixgbe_free_all_rx_resources(adapter);
+       }
+       ixgbe_reset_interrupt_capability(adapter);
+       ixgbe_napi_del_all(adapter);
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+
+#ifdef CONFIG_PM
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+#endif
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       ixgbe_release_hw_control(adapter);
+
+       pci_disable_device(pdev);
+
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
 }
 
+static void ixgbe_shutdown(struct pci_dev *pdev)
+{
+       ixgbe_suspend(pdev, PMSG_SUSPEND);
+}
+
 /**
  * ixgbe_update_stats - Update the board statistics counters.
  * @adapter: board private structure
@@ -1959,22 +3069,26 @@ static int ixgbe_close(struct net_device *netdev)
 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u64 good_rx, missed_rx, bprc;
+       u64 total_mpc = 0;
+       u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
-       good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
-       missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
-       adapter->stats.gprc += (good_rx - missed_rx);
-
-       adapter->stats.mpc[0] += missed_rx;
+       for (i = 0; i < 8; i++) {
+               /* for packet buffers not used, the register should read 0 */
+               mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               missed_rx += mpc;
+               adapter->stats.mpc[i] += mpc;
+               total_mpc += adapter->stats.mpc[i];
+               adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+       }
+       adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       /* work around hardware counting issue */
+       adapter->stats.gprc -= missed_rx;
+
+       /* 82598 hardware only has a 32 bit counter in the high register */
        adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+       adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+       adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        adapter->stats.bprc += bprc;
        adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
@@ -1986,45 +3100,46 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
        adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
        adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
        adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
        adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-       adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
        adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-       adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       adapter->stats.lxontxc += lxon;
+       lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       adapter->stats.lxofftxc += lxoff;
        adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
        adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
-       adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-       adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
+       adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+       /*
+        * 82598 errata - tx of flow control packets is included in tx counters
+        */
+       xon_off_tot = lxon + lxoff;
+       adapter->stats.gptc -= xon_off_tot;
+       adapter->stats.mptc -= xon_off_tot;
+       adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
        adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
        adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
        adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
-       adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
        adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+       adapter->stats.ptc64 -= xon_off_tot;
        adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
        adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
        adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
        adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
        adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
-       adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
        adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
 
        /* Fill out the OS statistics structure */
-       adapter->net_stats.rx_packets = adapter->stats.gprc;
-       adapter->net_stats.tx_packets = adapter->stats.gptc;
-       adapter->net_stats.rx_bytes = adapter->stats.gorc;
-       adapter->net_stats.tx_bytes = adapter->stats.gotc;
        adapter->net_stats.multicast = adapter->stats.mprc;
 
        /* Rx Errors */
        adapter->net_stats.rx_errors = adapter->stats.crcerrs +
-                                               adapter->stats.rlec;
+                                      adapter->stats.rlec;
        adapter->net_stats.rx_dropped = 0;
        adapter->net_stats.rx_length_errors = adapter->stats.rlec;
        adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-       adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
-
+       adapter->net_stats.rx_missed_errors = total_mpc;
 }
 
 /**
@@ -2034,70 +3149,105 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 static void ixgbe_watchdog(unsigned long data)
 {
        struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
-       struct net_device *netdev = adapter->netdev;
-       bool link_up;
-       u32 link_speed = 0;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       /* Do the watchdog outside of interrupt context due to the lovely
+        * delays that some of the newer hardware requires */
+       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+               /* Cause software interrupt to ensure rx rings are cleaned */
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+                       u32 eics =
+                        (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
+                       IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
+               } else {
+                       /* For legacy and MSI interrupts don't set any bits that
+                        * are enabled for EIAM, because this operation would
+                        * set *both* EIMS and EICS for any bit in EIAM */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICS,
+                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+               }
+               /* Reset the timer */
+               mod_timer(&adapter->watchdog_timer,
+                         round_jiffies(jiffies + 2 * HZ));
+       }
+
+       schedule_work(&adapter->watchdog_task);
+}
 
-       adapter->hw.phy.ops.check(&adapter->hw, &(link_speed), &link_up);
+/**
+ * ixgbe_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_watchdog_task(struct work_struct *work)
+{
+       struct ixgbe_adapter *adapter = container_of(work,
+                                                    struct ixgbe_adapter,
+                                                    watchdog_task);
+       struct net_device *netdev = adapter->netdev;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 link_speed = adapter->link_speed;
+       bool link_up = adapter->link_up;
+
+       adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+       if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+               if (link_up ||
+                   time_after(jiffies, (adapter->link_check_timeout +
+                                        IXGBE_TRY_LINK_TIMEOUT))) {
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+                       adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+               }
+               adapter->link_up = link_up;
+               adapter->link_speed = link_speed;
+       }
 
        if (link_up) {
                if (!netif_carrier_ok(netdev)) {
-                       u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
-                       u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
+                       u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+                       u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
                        DPRINTK(LINK, INFO, "NIC Link is Up %s, "
-                               "Flow Control: %s\n",
-                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
-                                "10 Gbps" :
-                                (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-                                 "1 Gpbs" : "unknown speed")),
-                               ((FLOW_RX && FLOW_TX) ? "RX/TX" :
-                                (FLOW_RX ? "RX" :
-                                (FLOW_TX ? "TX" : "None"))));
+                               "Flow Control: %s\n",
+                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+                                "10 Gbps" :
+                                (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+                                 "1 Gbps" : "unknown speed")),
+                               ((FLOW_RX && FLOW_TX) ? "RX/TX" :
+                                (FLOW_RX ? "RX" :
+                                (FLOW_TX ? "TX" : "None"))));
 
                        netif_carrier_on(netdev);
-                       netif_wake_queue(netdev);
+                       netif_tx_wake_all_queues(netdev);
                } else {
                        /* Force detection of hung controller */
                        adapter->detect_tx_hung = true;
                }
        } else {
+               adapter->link_up = false;
+               adapter->link_speed = 0;
                if (netif_carrier_ok(netdev)) {
                        DPRINTK(LINK, INFO, "NIC Link is Down\n");
                        netif_carrier_off(netdev);
-                       netif_stop_queue(netdev);
+                       netif_tx_stop_all_queues(netdev);
                }
        }
 
        ixgbe_update_stats(adapter);
-
-       /* Reset the timer */
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               mod_timer(&adapter->watchdog_timer,
-                         round_jiffies(jiffies + 2 * HZ));
+       adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
 }
 
-#define IXGBE_MAX_TXD_PWR      14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
-                        (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
-       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)   /* for context */
-
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
-                        struct ixgbe_ring *tx_ring, struct sk_buff *skb,
-                        u32 tx_flags, u8 *hdr_len)
+                     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+                     u32 tx_flags, u8 *hdr_len)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
        int err;
        struct ixgbe_tx_buffer *tx_buffer_info;
-       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-       u32 mss_l4len_idx = 0, l4len;
-       *hdr_len = 0;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+       u32 mss_l4len_idx, l4len;
 
        if (skb_is_gso(skb)) {
                if (skb_header_cloned(skb)) {
@@ -2108,21 +3258,21 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                l4len = tcp_hdrlen(skb);
                *hdr_len += l4len;
 
-               if (skb->protocol == ntohs(ETH_P_IP)) {
+               if (skb->protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                                iph->daddr, 0,
-                                                                IPPROTO_TCP,
-                                                                0);
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
                        adapter->hw_tso_ctxt++;
                } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
                        ipv6_hdr(skb)->payload_len = 0;
                        tcp_hdr(skb)->check =
                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                            &ipv6_hdr(skb)->daddr,
-                                            0, IPPROTO_TCP, 0);
+                                            &ipv6_hdr(skb)->daddr,
+                                            0, IPPROTO_TCP, 0);
                        adapter->hw_tso6_ctxt++;
                }
 
@@ -2136,7 +3286,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                        vlan_macip_lens |=
                            (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
                vlan_macip_lens |= ((skb_network_offset(skb)) <<
-                                   IXGBE_ADVTXD_MACLEN_SHIFT);
+                                   IXGBE_ADVTXD_MACLEN_SHIFT);
                *hdr_len += skb_network_offset(skb);
                vlan_macip_lens |=
                    (skb_transport_header(skb) - skb_network_header(skb));
@@ -2146,18 +3296,20 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                context_desc->seqnum_seed = 0;
 
                /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-               type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
-                                   IXGBE_ADVTXD_DTYP_CTXT);
+               type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+                                  IXGBE_ADVTXD_DTYP_CTXT);
 
-               if (skb->protocol == ntohs(ETH_P_IP))
+               if (skb->protocol == htons(ETH_P_IP))
                        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
 
                /* MSS L4LEN IDX */
-               mss_l4len_idx |=
+               mss_l4len_idx =
                    (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
                mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+               /* use index 1 for TSO */
+               mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
                context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
 
                tx_buffer_info->time_stamp = jiffies;
@@ -2174,8 +3326,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
 }
 
 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *tx_ring,
-                                  struct sk_buff *skb, u32 tx_flags)
+                          struct ixgbe_ring *tx_ring,
+                          struct sk_buff *skb, u32 tx_flags)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -2192,30 +3344,48 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                        vlan_macip_lens |=
                            (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
                vlan_macip_lens |= (skb_network_offset(skb) <<
-                                   IXGBE_ADVTXD_MACLEN_SHIFT);
+                                   IXGBE_ADVTXD_MACLEN_SHIFT);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        vlan_macip_lens |= (skb_transport_header(skb) -
-                                           skb_network_header(skb));
+                                           skb_network_header(skb));
 
                context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
                context_desc->seqnum_seed = 0;
 
                type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
-                                   IXGBE_ADVTXD_DTYP_CTXT);
+                                   IXGBE_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       if (skb->protocol == ntohs(ETH_P_IP))
+                       switch (skb->protocol) {
+                       case __constant_htons(ETH_P_IP):
                                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
-
-                       if (skb->sk->sk_protocol == IPPROTO_TCP)
-                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+                                       type_tucmd_mlhl |=
+                                               IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                               break;
+                       case __constant_htons(ETH_P_IPV6):
+                               /* XXX what about other V6 headers?? */
+                               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+                                       type_tucmd_mlhl |=
+                                               IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                               break;
+                       default:
+                               if (unlikely(net_ratelimit())) {
+                                       DPRINTK(PROBE, WARNING,
+                                        "partial checksum but proto=%x!\n",
+                                        skb->protocol);
+                               }
+                               break;
+                       }
                }
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+               /* use index zero for tx checksum offload */
                context_desc->mss_l4len_idx = 0;
 
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
+
                adapter->hw_csum_tx_good++;
                i++;
                if (i == tx_ring->count)
@@ -2224,12 +3394,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 
                return true;
        }
+
        return false;
 }
 
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
-                       struct ixgbe_ring *tx_ring,
-                       struct sk_buff *skb, unsigned int first)
+                        struct ixgbe_ring *tx_ring,
+                        struct sk_buff *skb, unsigned int first)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len = skb->len;
@@ -2247,8 +3418,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                tx_buffer_info->length = size;
                tx_buffer_info->dma = pci_map_single(adapter->pdev,
-                                                 skb->data + offset,
-                                                 size, PCI_DMA_TODEVICE);
+                                                    skb->data + offset,
+                                                    size, PCI_DMA_TODEVICE);
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
@@ -2273,9 +3444,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                        tx_buffer_info->length = size;
                        tx_buffer_info->dma = pci_map_page(adapter->pdev,
-                                                       frag->page,
-                                                       offset,
-                                                       size, PCI_DMA_TODEVICE);
+                                                          frag->page,
+                                                          offset,
+                                                          size,
+                                                          PCI_DMA_TODEVICE);
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
 
@@ -2298,8 +3470,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-                              struct ixgbe_ring *tx_ring,
-                              int tx_flags, int count, u32 paylen, u8 hdr_len)
+                           struct ixgbe_ring *tx_ring,
+                           int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
        struct ixgbe_tx_buffer *tx_buffer_info;
@@ -2318,15 +3490,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
                cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
 
                olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-                                               IXGBE_ADVTXD_POPTS_SHIFT;
+                                IXGBE_ADVTXD_POPTS_SHIFT;
 
+               /* use index 1 context for tso */
+               olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
                if (tx_flags & IXGBE_TX_FLAGS_IPV4)
                        olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
-                                               IXGBE_ADVTXD_POPTS_SHIFT;
+                                        IXGBE_ADVTXD_POPTS_SHIFT;
 
        } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
                olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-                                               IXGBE_ADVTXD_POPTS_SHIFT;
+                                IXGBE_ADVTXD_POPTS_SHIFT;
 
        olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
@@ -2336,9 +3510,8 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
                tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
                tx_desc->read.cmd_type_len =
-                       cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+                       cpu_to_le32(cmd_type_len | tx_buffer_info->length);
                tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
-
                i++;
                if (i == tx_ring->count)
                        i = 0;
@@ -2358,53 +3531,71 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
+static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
+                                 struct ixgbe_ring *tx_ring, int size)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       netif_stop_subqueue(netdev, tx_ring->queue_index);
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it. */
+       smp_mb();
+
+       /* We need to check again in a case another CPU has just
+        * made room available. */
+       if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! - use start_queue because it doesn't call schedule */
+       netif_start_subqueue(netdev, tx_ring->queue_index);
+       ++adapter->restart_queue;
+       return 0;
+}
+
+static int ixgbe_maybe_stop_tx(struct net_device *netdev,
+                              struct ixgbe_ring *tx_ring, int size)
+{
+       if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+}
+
 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *tx_ring;
-       unsigned int len = skb->len;
        unsigned int first;
        unsigned int tx_flags = 0;
-       unsigned long flags = 0;
-       u8 hdr_len;
-       int tso;
-       unsigned int mss = 0;
+       u8 hdr_len = 0;
+       int r_idx = 0, tso;
        int count = 0;
        unsigned int f;
-       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
-       len -= skb->data_len;
 
-       tx_ring = adapter->tx_ring;
+       r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
+       tx_ring = &adapter->tx_ring[r_idx];
 
-       if (skb->len <= 0) {
-               dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+               tx_flags |= vlan_tx_tag_get(skb);
+               tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+               tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
-       mss = skb_shinfo(skb)->gso_size;
-
-       if (mss)
-               count++;
-       else if (skb->ip_summed == CHECKSUM_PARTIAL)
+       /* three things can cause us to need a context descriptor */
+       if (skb_is_gso(skb) ||
+           (skb->ip_summed == CHECKSUM_PARTIAL) ||
+           (tx_flags & IXGBE_TX_FLAGS_VLAN))
                count++;
 
-       count += TXD_USE_COUNT(len);
-       for (f = 0; f < nr_frags; f++)
+       count += TXD_USE_COUNT(skb_headlen(skb));
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
-       if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
+       if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
                adapter->tx_busy++;
-               netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                return NETDEV_TX_BUSY;
        }
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
-               tx_flags |= IXGBE_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
-       }
 
-       if (skb->protocol == ntohs(ETH_P_IP))
+       if (skb->protocol == htons(ETH_P_IP))
                tx_flags |= IXGBE_TX_FLAGS_IPV4;
        first = tx_ring->next_to_use;
        tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
@@ -2416,20 +3607,16 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        if (tso)
                tx_flags |= IXGBE_TX_FLAGS_TSO;
        else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
-                (skb->ip_summed == CHECKSUM_PARTIAL))
+                (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IXGBE_TX_FLAGS_CSUM;
 
        ixgbe_tx_queue(adapter, tx_ring, tx_flags,
-                          ixgbe_tx_map(adapter, tx_ring, skb, first),
-                          skb->len, hdr_len);
+                      ixgbe_tx_map(adapter, tx_ring, skb, first),
+                      skb->len, hdr_len);
 
        netdev->trans_start = jiffies;
 
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
-       /* Make sure there is space in the ring for the next send. */
-       if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
-               netif_stop_queue(netdev);
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+       ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 
        return NETDEV_TX_OK;
 }
@@ -2459,15 +3646,16 @@ static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
 static int ixgbe_set_mac(struct net_device *netdev, void *p)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+       memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 
        return 0;
 }
@@ -2491,6 +3679,22 @@ static void ixgbe_netpoll(struct net_device *netdev)
 #endif
 
 /**
+ * ixgbe_link_config - set up initial link with default speed and duplex
+ * @hw: pointer to private hardware struct
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_link_config(struct ixgbe_hw *hw)
+{
+       u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
+
+       /* must always autoneg for both 1G and 10G link */
+       hw->mac.autoneg = true;
+
+       return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+}
+
+/**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
  * @ent: entry in ixgbe_pci_tbl
@@ -2502,17 +3706,16 @@ static void ixgbe_netpoll(struct net_device *netdev)
  * and a hardware reset occur.
  **/
 static int __devinit ixgbe_probe(struct pci_dev *pdev,
-                                const struct pci_device_id *ent)
+                                 const struct pci_device_id *ent)
 {
        struct net_device *netdev;
        struct ixgbe_adapter *adapter = NULL;
        struct ixgbe_hw *hw;
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
-       unsigned long mmio_start, mmio_len;
        static int cards_found;
        int i, err, pci_using_dac;
        u16 link_status, link_speed, link_width;
-       u32 part_num;
+       u32 part_num, eec;
 
        err = pci_enable_device(pdev);
        if (err)
@@ -2527,7 +3730,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                        err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
                        if (err) {
                                dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
+                                       "configuration, aborting\n");
                                goto err_dma;
                        }
                }
@@ -2541,8 +3744,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        }
 
        pci_set_master(pdev);
+       pci_save_state(pdev);
 
-       netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
+       netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
        if (!netdev) {
                err = -ENOMEM;
                goto err_alloc_etherdev;
@@ -2559,10 +3763,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        hw->back = adapter;
        adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 
-       mmio_start = pci_resource_start(pdev, 0);
-       mmio_len = pci_resource_len(pdev, 0);
-
-       hw->hw_addr = ioremap(mmio_start, mmio_len);
+       hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+                             pci_resource_len(pdev, 0));
        if (!hw->hw_addr) {
                err = -EIO;
                goto err_ioremap;
@@ -2577,13 +3779,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->stop = &ixgbe_close;
        netdev->hard_start_xmit = &ixgbe_xmit_frame;
        netdev->get_stats = &ixgbe_get_stats;
-       netdev->set_multicast_list = &ixgbe_set_multi;
+       netdev->set_rx_mode = &ixgbe_set_rx_mode;
+       netdev->set_multicast_list = &ixgbe_set_rx_mode;
        netdev->set_mac_address = &ixgbe_set_mac;
        netdev->change_mtu = &ixgbe_change_mtu;
        ixgbe_set_ethtool_ops(netdev);
        netdev->tx_timeout = &ixgbe_tx_timeout;
        netdev->watchdog_timeo = 5 * HZ;
-       netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64);
        netdev->vlan_rx_register = ixgbe_vlan_rx_register;
        netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
        netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
@@ -2592,21 +3794,22 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 #endif
        strcpy(netdev->name, pci_name(pdev));
 
-       netdev->mem_start = mmio_start;
-       netdev->mem_end = mmio_start + mmio_len;
-
        adapter->bd_number = cards_found;
 
-       /* PCI config space info */
-       hw->vendor_id = pdev->vendor;
-       hw->device_id = pdev->device;
-       hw->revision_id = pdev->revision;
-       hw->subsystem_vendor_id = pdev->subsystem_vendor;
-       hw->subsystem_device_id = pdev->subsystem_device;
-
        /* Setup hw api */
        memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+       hw->mac.type  = ii->mac;
+
+       /* EEPROM */
+       memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+       /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
+       if (!(eec & (1 << 8)))
+               hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+
+       /* PHY */
        memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+       /* phy->sfp_type = ixgbe_sfp_type_unknown; */
 
        err = ii->get_invariants(hw);
        if (err)
@@ -2617,21 +3820,34 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
+       /* reset_hw fills in the perm_addr as well */
+       err = hw->mac.ops.reset_hw(hw);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
+               goto err_sw_init;
+       }
+
        netdev->features = NETIF_F_SG |
-                          NETIF_F_HW_CSUM |
-                          NETIF_F_HW_VLAN_TX |
-                          NETIF_F_HW_VLAN_RX |
-                          NETIF_F_HW_VLAN_FILTER;
+                          NETIF_F_IP_CSUM |
+                          NETIF_F_HW_VLAN_TX |
+                          NETIF_F_HW_VLAN_RX |
+                          NETIF_F_HW_VLAN_FILTER;
 
+       netdev->features |= NETIF_F_IPV6_CSUM;
        netdev->features |= NETIF_F_TSO;
-
        netdev->features |= NETIF_F_TSO6;
+       netdev->features |= NETIF_F_LRO;
+
+       netdev->vlan_features |= NETIF_F_TSO;
+       netdev->vlan_features |= NETIF_F_TSO6;
+       netdev->vlan_features |= NETIF_F_IP_CSUM;
+       netdev->vlan_features |= NETIF_F_SG;
+
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-
        /* make sure the EEPROM is good */
-       if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
+       if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
                dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
                err = -EIO;
                goto err_eeprom;
@@ -2640,7 +3856,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
        memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 
-       if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
+       if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+               dev_err(&pdev->dev, "invalid MAC address\n");
                err = -EIO;
                goto err_eeprom;
        }
@@ -2650,58 +3867,80 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        adapter->watchdog_timer.data = (unsigned long)adapter;
 
        INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
+       INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
 
-       /* initialize default flow control settings */
-       hw->fc.original_type = ixgbe_fc_full;
-       hw->fc.type = ixgbe_fc_full;
-       hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-       hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
-       hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
-
-       /* Interrupt Throttle Rate */
-       adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
-       adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
+       err = ixgbe_init_interrupt_scheme(adapter);
+       if (err)
+               goto err_sw_init;
 
        /* print bus type/speed/width info */
        pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
        link_speed = link_status & IXGBE_PCI_LINK_SPEED;
        link_width = link_status & IXGBE_PCI_LINK_WIDTH;
        dev_info(&pdev->dev, "(PCI Express:%s:%s) "
-                "%02x:%02x:%02x:%02x:%02x:%02x\n",
-               ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
-                (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
-                "Unknown"),
-               ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
-                (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
-                (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
-                (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
-                "Unknown"),
-               netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
-               netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
-       ixgbe_read_part_num(hw, &part_num);
+                "%02x:%02x:%02x:%02x:%02x:%02x\n",
+               ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
+                (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
+                "Unknown"),
+               ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
+                (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
+                (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
+                (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
+                "Unknown"),
+               netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+               netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+       ixgbe_read_pba_num_generic(hw, &part_num);
        dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-                hw->mac.type, hw->phy.type,
-                (part_num >> 8), (part_num & 0xff));
+                hw->mac.type, hw->phy.type,
+                (part_num >> 8), (part_num & 0xff));
+
+       if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
+               dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
+                        "this card is not sufficient for optimal "
+                        "performance.\n");
+               dev_warn(&pdev->dev, "For optimal performance a x8 "
+                        "PCI-Express slot is required.\n");
+       }
 
        /* reset the hardware with the new settings */
-       ixgbe_start_hw(hw);
+       hw->mac.ops.start_hw(hw);
+
+       /* link_config depends on start_hw being called at least once */
+       err = ixgbe_link_config(hw);
+       if (err) {
+               dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
+               goto err_register;
+       }
 
        netif_carrier_off(netdev);
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
+
+       ixgbe_napi_add_all(adapter);
 
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
                goto err_register;
 
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       if (dca_add_requester(&pdev->dev) == 0) {
+               adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+               /* always use CB2 mode, difference is masked
+                * in the CB driver */
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+               ixgbe_setup_dca(adapter);
+       }
+#endif
 
        dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
        cards_found++;
        return 0;
 
 err_register:
+       ixgbe_release_hw_control(adapter);
 err_hw_init:
 err_sw_init:
+       ixgbe_reset_interrupt_capability(adapter);
 err_eeprom:
        iounmap(hw->hw_addr);
 err_ioremap:
@@ -2733,14 +3972,28 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 
        flush_scheduled_work();
 
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+               adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+               dca_remove_requester(&pdev->dev);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+       }
+
+#endif
        unregister_netdev(netdev);
 
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+       ixgbe_reset_interrupt_capability(adapter);
+
+       ixgbe_release_hw_control(adapter);
 
        iounmap(adapter->hw.hw_addr);
        pci_release_regions(pdev);
 
+       DPRINTK(PROBE, INFO, "complete\n");
+       ixgbe_napi_del_all(adapter);
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+
        free_netdev(netdev);
 
        pci_disable_device(pdev);
@@ -2755,7 +4008,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
  * this device has been detected.
  */
 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
-                                               pci_channel_state_t state)
+                                                pci_channel_state_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbe_adapter *adapter = netdev->priv;
@@ -2766,7 +4019,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                ixgbe_down(adapter);
        pci_disable_device(pdev);
 
-       /* Request a slot slot reset. */
+       /* Request a slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -2783,10 +4036,11 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 
        if (pci_enable_device(pdev)) {
                DPRINTK(PROBE, ERR,
-                       "Cannot re-enable PCI device after reset.\n");
+                       "Cannot re-enable PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
        pci_set_master(pdev);
+       pci_restore_state(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -2816,7 +4070,6 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
        }
 
        netif_device_attach(netdev);
-
 }
 
 static struct pci_error_handlers ixgbe_err_handler = {
@@ -2852,9 +4105,14 @@ static int __init ixgbe_init_module(void)
 
        printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
 
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       dca_register_notify(&dca_notifier);
+
+#endif
        ret = pci_register_driver(&ixgbe_driver);
        return ret;
 }
+
 module_init(ixgbe_init_module);
 
 /**
@@ -2865,8 +4123,25 @@ module_init(ixgbe_init_module);
  **/
 static void __exit ixgbe_exit_module(void)
 {
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+       dca_unregister_notify(&dca_notifier);
+#endif
        pci_unregister_driver(&ixgbe_driver);
 }
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
+                            void *p)
+{
+       int ret_val;
+
+       ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
+                                        __ixgbe_notify_dca);
+
+       return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
+
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */