Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / drivers / net / ixgbe / ixgbe_main.c
index e1efa1d..6369852 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
+#include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
+#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                               "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "2.0.24-k2"
+#define DRV_VERSION "2.0.44-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -65,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  *   Class, Class Mask, private data (not used) }
  */
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
         board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -74,6 +77,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
         board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
         board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
+        board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
         board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
@@ -92,8 +97,18 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
+        board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+        board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+        board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
+        board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
+        board_82599 },
 
        /* required last entry */
        {0, }
@@ -110,6 +125,13 @@ static struct notifier_block dca_notifier = {
 };
 #endif
 
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+                 "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -117,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 gcr;
+       u32 gpie;
+       u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+       /* disable iov and allow time for transactions to clear */
+       pci_disable_sriov(adapter->pdev);
+#endif
+
+       /* turn off device IOV mode */
+       gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+       gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+       IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* set default pool back to 0 */
+       vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+       vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+       IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+       /* take a breather then clean up driver data */
+       msleep(100);
+       if (adapter->vfinfo)
+               kfree(adapter->vfinfo);
+       adapter->vfinfo = NULL;
+
+       adapter->num_vfs = 0;
+       adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
 {
        u32 ctrl_ext;
@@ -186,14 +243,40 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
        }
 }
 
+static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+                                          u64 qmask)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+       } else {
+               mask = (qmask & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+               mask = (qmask >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+       }
+}
+
 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
                                              struct ixgbe_tx_buffer
                                              *tx_buffer_info)
 {
-       tx_buffer_info->dma = 0;
+       if (tx_buffer_info->dma) {
+               if (tx_buffer_info->mapped_as_page)
+                       pci_unmap_page(adapter->pdev,
+                                      tx_buffer_info->dma,
+                                      tx_buffer_info->length,
+                                      PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_single(adapter->pdev,
+                                        tx_buffer_info->dma,
+                                        tx_buffer_info->length,
+                                        PCI_DMA_TODEVICE);
+               tx_buffer_info->dma = 0;
+       }
        if (tx_buffer_info->skb) {
-               skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
-                             DMA_TO_DEVICE);
                dev_kfree_skb_any(tx_buffer_info->skb);
                tx_buffer_info->skb = NULL;
        }
@@ -201,6 +284,61 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
        /* tx_buffer_info must be completely set up in the transmit path */
 }
 
+/**
+ * ixgbe_tx_is_paused - check if the tx ring is paused
+ * @adapter: the ixgbe adapter
+ * @tx_ring: the corresponding tx_ring
+ *
+ * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
+ * corresponding TC of this tx_ring when checking TFCS.
+ *
+ * Returns : true if paused
+ */
+static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+                                      struct ixgbe_ring *tx_ring)
+{
+       u32 txoff = IXGBE_TFCS_TXOFF;
+
+#ifdef CONFIG_IXGBE_DCB
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               int tc;
+               int reg_idx = tx_ring->reg_idx;
+               int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82598EB:
+                       tc = reg_idx >> 2;
+                       txoff = IXGBE_TFCS_TXOFF0;
+                       break;
+               case ixgbe_mac_82599EB:
+                       tc = 0;
+                       txoff = IXGBE_TFCS_TXOFF;
+                       if (dcb_i == 8) {
+                               /* TC0, TC1 */
+                               tc = reg_idx >> 5;
+                               if (tc == 2) /* TC2, TC3 */
+                                       tc += (reg_idx - 64) >> 4;
+                               else if (tc == 3) /* TC4, TC5, TC6, TC7 */
+                                       tc += 1 + ((reg_idx - 96) >> 3);
+                       } else if (dcb_i == 4) {
+                               /* TC0, TC1 */
+                               tc = reg_idx >> 6;
+                               if (tc == 1) {
+                                       tc += (reg_idx - 64) >> 5;
+                                       if (tc == 2) /* TC2, TC3 */
+                                               tc += (reg_idx - 96) >> 4;
+                               }
+                       }
+                       break;
+               default:
+                       tc = 0;
+               }
+               txoff <<= tc;
+       }
+#endif
+       return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+}
+
 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
                                        struct ixgbe_ring *tx_ring,
                                        unsigned int eop)
@@ -212,7 +350,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
        adapter->detect_tx_hung = false;
        if (tx_ring->tx_buffer_info[eop].time_stamp &&
            time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
-           !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
+           !ixgbe_tx_is_paused(adapter, tx_ring)) {
                /* detected Tx unit hang */
                union ixgbe_adv_tx_desc *tx_desc;
                tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -248,14 +386,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
 
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
- *
- * returns true if transmit work is done
  **/
-static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
+static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                                struct ixgbe_ring *tx_ring)
 {
+       struct ixgbe_adapter *adapter = q_vector->adapter;
        struct net_device *netdev = adapter->netdev;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
@@ -278,12 +415,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 
                        if (cleaned && skb) {
                                unsigned int segs, bytecount;
+                               unsigned int hlen = skb_headlen(skb);
 
                                /* gso_segs is currently only valid for tcp */
                                segs = skb_shinfo(skb)->gso_segs ?: 1;
+#ifdef IXGBE_FCOE
+                               /* adjust for FCoE Sequence Offload */
+                               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+                                   && (skb->protocol == htons(ETH_P_FCOE)) &&
+                                   skb_is_gso(skb)) {
+                                       hlen = skb_transport_offset(skb) +
+                                               sizeof(struct fc_frame_header) +
+                                               sizeof(struct fcoe_crc_eof);
+                                       segs = DIV_ROUND_UP(skb->len - hlen,
+                                               skb_shinfo(skb)->gso_size);
+                               }
+#endif /* IXGBE_FCOE */
                                /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
+                               bytecount = ((segs - 1) * hlen) + skb->len;
                                total_packets += segs;
                                total_bytes += bytecount;
                        }
@@ -314,7 +463,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
+                       ++tx_ring->restart_queue;
                }
        }
 
@@ -329,25 +478,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
        }
 
        /* re-arm the interrupt */
-       if (count >= tx_ring->work_limit) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
-                                       tx_ring->v_idx);
-               else if (tx_ring->v_idx & 0xFFFFFFFF)
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0),
-                                       tx_ring->v_idx);
-               else
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1),
-                                       (tx_ring->v_idx >> 32));
-       }
-
+       if (count >= tx_ring->work_limit)
+               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
 
        tx_ring->total_bytes += total_bytes;
        tx_ring->total_packets += total_packets;
        tx_ring->stats.packets += total_packets;
        tx_ring->stats.bytes += total_bytes;
-       adapter->net_stats.tx_bytes += total_bytes;
-       adapter->net_stats.tx_packets += total_packets;
        return (count < tx_ring->work_limit);
 }
 
@@ -386,19 +523,23 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
        u32 txctrl;
        int cpu = get_cpu();
        int q = tx_ring - adapter->tx_ring;
+       struct ixgbe_hw *hw = &adapter->hw;
 
        if (tx_ring->cpu != cpu) {
-               txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
                if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
                        txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
                        txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
                } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
                        txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
                        txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                  IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+                                 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
                }
-               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
                tx_ring->cpu = cpu;
        }
        put_cpu();
@@ -474,12 +615,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
 
        skb_record_rx_queue(skb, ring->queue_index);
        if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
-               if (adapter->vlgrp && is_vlan && (tag != 0))
+               if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
                        vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
                else
                        napi_gro_receive(napi, skb);
        } else {
-               if (adapter->vlgrp && is_vlan && (tag != 0))
+               if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
                        vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
                else
                        netif_rx(skb);
@@ -493,8 +634,11 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
  * @skb: skb currently being received and modified
  **/
 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
-                                     u32 status_err, struct sk_buff *skb)
+                                    union ixgbe_adv_rx_desc *rx_desc,
+                                    struct sk_buff *skb)
 {
+       u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
+
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Rx csum disabled */
@@ -512,13 +656,22 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
                return;
 
        if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+               u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+               /*
+                * 82599 errata, UDP frames with a 0 checksum can be marked as
+                * checksum errors.
+                */
+               if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
+                   (adapter->hw.mac.type == ixgbe_mac_82599EB))
+                       return;
+
                adapter->hw_csum_rx_error++;
                return;
        }
 
        /* It must be a TCP or UDP packet with a valid checksum */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
-       adapter->hw_csum_rx_good++;
 }
 
 static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
@@ -546,7 +699,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
        unsigned int i;
-       unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
 
        i = rx_ring->next_to_use;
        bi = &rx_ring->rx_buffer_info[i];
@@ -555,7 +707,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 
                if (!bi->page_dma &&
-                   (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+                   (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
                        if (!bi->page) {
                                bi->page = alloc_page(GFP_ATOMIC);
                                if (!bi->page) {
@@ -576,6 +728,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 
                if (!bi->skb) {
                        struct sk_buff *skb;
+                       /* netdev_alloc_skb reserves 32 bytes up front!! */
+                       uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
                        skb = netdev_alloc_skb(adapter->netdev, bufsz);
 
                        if (!skb) {
@@ -583,20 +737,18 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                                goto no_buffers;
                        }
 
-                       /*
-                        * Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
+                       /* advance the data pointer to the next cache line */
+                       skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
+                                         - skb->data));
 
                        bi->skb = skb;
-                       bi->dma = pci_map_single(pdev, skb->data, bufsz,
+                       bi->dma = pci_map_single(pdev, skb->data,
+                                                rx_ring->rx_buf_len,
                                                 PCI_DMA_FROMDEVICE);
                }
                /* Refresh the desc even if buffer_addrs didn't change because
                 * each write-back erases this info. */
-               if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
@@ -639,12 +791,14 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
+ * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
+                                                        u64 *count)
 {
        unsigned int frag_list_size = 0;
 
@@ -653,6 +807,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
                frag_list_size += skb->len;
                skb->prev = NULL;
                skb = prev;
+               *count += 1;
        }
 
        skb_shinfo(skb)->frag_list = skb->next;
@@ -668,6 +823,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                int *work_done, int work_to_do)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -678,6 +834,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        bool cleaned = false;
        int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+#ifdef IXGBE_FCOE
+       int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
 
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
@@ -690,12 +849,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        break;
                (*work_done)++;
 
-               if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
                        hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
                        len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
                               IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       if (hdr_info & IXGBE_RXDADV_SPH)
-                               adapter->rx_hdr_split++;
                        if (len > IXGBE_RX_HDR_SIZE)
                                len = IXGBE_RX_HDR_SIZE;
                        upper_len = le16_to_cpu(rx_desc->wb.upper.length);
@@ -705,13 +862,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
                cleaned = true;
                skb = rx_buffer_info->skb;
-               prefetch(skb->data - NET_IP_ALIGN);
+               prefetch(skb->data);
                rx_buffer_info->skb = NULL;
 
                if (rx_buffer_info->dma) {
                        pci_unmap_single(pdev, rx_buffer_info->dma,
                                         rx_ring->rx_buf_len,
                                         PCI_DMA_FROMDEVICE);
+                       rx_buffer_info->dma = 0;
                        skb_put(skb, len);
                }
 
@@ -743,25 +901,31 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                prefetch(next_rxd);
                cleaned_count++;
 
-               if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE)
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
                        rsc_count = ixgbe_get_rsc_count(rx_desc);
 
                if (rsc_count) {
                        u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
                                     IXGBE_RXDADV_NEXTP_SHIFT;
                        next_buffer = &rx_ring->rx_buffer_info[nextp];
-                       rx_ring->rsc_count += (rsc_count - 1);
                } else {
                        next_buffer = &rx_ring->rx_buffer_info[i];
                }
 
                if (staterr & IXGBE_RXD_STAT_EOP) {
                        if (skb->prev)
-                               skb = ixgbe_transform_rsc_queue(skb);
+                               skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
+                       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+                               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
+                                       rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
+                               else
+                                       rx_ring->rsc_count++;
+                               rx_ring->rsc_flush++;
+                       }
                        rx_ring->stats.packets++;
                        rx_ring->stats.bytes += skb->len;
                } else {
-                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+                       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
                                next_buffer->skb = skb;
@@ -770,7 +934,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                skb->next = next_buffer->skb;
                                skb->next->prev = skb;
                        }
-                       adapter->non_eop_descs++;
+                       rx_ring->non_eop_descs++;
                        goto next_desc;
                }
 
@@ -779,7 +943,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        goto next_desc;
                }
 
-               ixgbe_rx_checksum(adapter, staterr, skb);
+               ixgbe_rx_checksum(adapter, rx_desc, skb);
 
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
@@ -788,9 +952,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                skb->protocol = eth_type_trans(skb, adapter->netdev);
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
-               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-                       if (!ixgbe_fcoe_ddp(adapter, rx_desc, skb))
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+                       if (!ddp_bytes)
                                goto next_desc;
+               }
 #endif /* IXGBE_FCOE */
                ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 
@@ -816,10 +982,25 @@ next_desc:
        if (cleaned_count)
                ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
+#ifdef IXGBE_FCOE
+       /* include DDPed FCoE data */
+       if (ddp_bytes > 0) {
+               unsigned int mss;
+
+               mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+                       sizeof(struct fc_frame_header) -
+                       sizeof(struct fcoe_crc_eof);
+               if (mss > 512)
+                       mss &= ~511;
+               total_rx_bytes += ddp_bytes;
+               total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+       }
+#endif /* IXGBE_FCOE */
+
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
-       adapter->net_stats.rx_bytes += total_rx_bytes;
-       adapter->net_stats.rx_packets += total_rx_packets;
+       netdev->stats.rx_bytes += total_rx_bytes;
+       netdev->stats.rx_packets += total_rx_packets;
 
        return cleaned;
 }
@@ -868,19 +1049,14 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                              r_idx + 1);
                }
 
-               /* if this is a tx only vector halve the interrupt rate */
                if (q_vector->txr_count && !q_vector->rxr_count)
-                       q_vector->eitr = (adapter->eitr_param >> 1);
+                       /* tx only */
+                       q_vector->eitr = adapter->tx_eitr_param;
                else if (q_vector->rxr_count)
-                       /* rx only */
-                       q_vector->eitr = adapter->eitr_param;
+                       /* rx or mixed */
+                       q_vector->eitr = adapter->rx_eitr_param;
 
-               /*
-                * since this is initial set up don't need to call
-                * ixgbe_write_eitr helper
-                */
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
-                               EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+               ixgbe_write_eitr(q_vector);
        }
 
        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -892,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 
        /* set up to autoclear timer, and the vectors */
        mask = IXGBE_EIMS_ENABLE_MASK;
-       mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+       if (adapter->num_vfs)
+               mask &= ~(IXGBE_EIMS_OTHER |
+                         IXGBE_EIMS_MAILBOX |
+                         IXGBE_EIMS_LSC);
+       else
+               mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 }
 
@@ -965,17 +1146,19 @@ update_itr_done:
 
 /**
  * ixgbe_write_eitr - write EITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
+ * @q_vector: structure containing interrupt and ring information
  *
  * This function is made to be called by ethtool and by the driver
  * when it needs to update EITR registers at runtime.  Hardware
  * specific quirks/differences are taken care of here.
  */
-void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg)
+void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
 {
+       struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_hw *hw = &adapter->hw;
+       int v_idx = q_vector->v_idx;
+       u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
+
        if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
@@ -994,7 +1177,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
        struct ixgbe_adapter *adapter = q_vector->adapter;
        u32 new_itr;
        u8 current_itr, ret_itr;
-       int i, r_idx, v_idx = q_vector->v_idx;
+       int i, r_idx;
        struct ixgbe_ring *rx_ring, *tx_ring;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -1044,14 +1227,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
        }
 
        if (new_itr != q_vector->eitr) {
-               u32 itr_reg;
+               /* do an exponential smoothing */
+               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
-               /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-               ixgbe_write_eitr(adapter, v_idx, itr_reg);
+
+               ixgbe_write_eitr(q_vector);
        }
 
        return;
@@ -1096,6 +1278,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
        adapter->link_check_timeout = jiffies;
        if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
                IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+               IXGBE_WRITE_FLUSH(hw);
                schedule_work(&adapter->watchdog_task);
        }
 }
@@ -1119,17 +1302,70 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
+       if (eicr & IXGBE_EICR_MAILBOX)
+               ixgbe_msg_task(adapter);
+
        if (hw->mac.type == ixgbe_mac_82598EB)
                ixgbe_check_fan_failure(adapter, eicr);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       if (hw->mac.type == ixgbe_mac_82599EB) {
                ixgbe_check_sfp_event(adapter, eicr);
+
+               /* Handle Flow Director Full threshold interrupt */
+               if (eicr & IXGBE_EICR_FLOW_DIR) {
+                       int i;
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
+                       /* Disable transmits before FDIR Re-initialization */
+                       netif_tx_stop_all_queues(netdev);
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               struct ixgbe_ring *tx_ring =
+                                                          &adapter->tx_ring[i];
+                               if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
+                                                      &tx_ring->reinit_state))
+                                       schedule_work(&adapter->fdir_reinit_task);
+                       }
+               }
+       }
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
        return IRQ_HANDLED;
 }
 
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+                                          u64 qmask)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+       } else {
+               mask = (qmask & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+               mask = (qmask >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+       }
+       /* skip the flush */
+}
+
+static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
+                                            u64 qmask)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+       } else {
+               mask = (qmask & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+               mask = (qmask >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+       }
+       /* skip the flush */
+}
+
 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 {
        struct ixgbe_q_vector *q_vector = data;
@@ -1143,17 +1379,15 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                tx_ring = &(adapter->tx_ring[r_idx]);
-#ifdef CONFIG_IXGBE_DCA
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_tx_dca(adapter, tx_ring);
-#endif
                tx_ring->total_bytes = 0;
                tx_ring->total_packets = 0;
-               ixgbe_clean_tx_irq(adapter, tx_ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
        }
 
+       /* EIAM disabled interrupts (on this vector) for us */
+       napi_schedule(&q_vector->napi);
+
        return IRQ_HANDLED;
 }
 
@@ -1182,16 +1416,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
 
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
        /* disable interrupts on this vector only */
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
-       else if (rx_ring->v_idx & 0xFFFFFFFF)
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx);
-       else
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1),
-                               (rx_ring->v_idx >> 32));
+       /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -1199,27 +1425,37 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
 
 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
 {
-       ixgbe_msix_clean_rx(irq, data);
-       ixgbe_msix_clean_tx(irq, data);
+       struct ixgbe_q_vector *q_vector = data;
+       struct ixgbe_adapter  *adapter = q_vector->adapter;
+       struct ixgbe_ring  *ring;
+       int r_idx;
+       int i;
 
-       return IRQ_HANDLED;
-}
+       if (!q_vector->txr_count && !q_vector->rxr_count)
+               return IRQ_HANDLED;
 
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
-                                          u64 qmask)
-{
-       u32 mask;
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               ring = &(adapter->tx_ring[r_idx]);
+               ring->total_bytes = 0;
+               ring->total_packets = 0;
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       } else {
-               mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
-               mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0; i < q_vector->rxr_count; i++) {
+               ring = &(adapter->rx_ring[r_idx]);
+               ring->total_bytes = 0;
+               ring->total_packets = 0;
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
        }
-       /* skip the flush */
+
+       /* EIAM disabled interrupts (on this vector) for us */
+       napi_schedule(&q_vector->napi);
+
+       return IRQ_HANDLED;
 }
 
 /**
@@ -1251,32 +1487,45 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
-               if (adapter->itr_setting & 1)
+               if (adapter->rx_itr_setting & 1)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, rx_ring->v_idx);
+                       ixgbe_irq_enable_queues(adapter,
+                                               ((u64)1 << q_vector->v_idx));
        }
 
        return work_done;
 }
 
 /**
- * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
  *
  * This function will clean more than one rx queue associated with a
  * q_vector.
  **/
-static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
+static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
 {
        struct ixgbe_q_vector *q_vector =
                               container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *rx_ring = NULL;
+       struct ixgbe_ring *ring = NULL;
        int work_done = 0, i;
        long r_idx;
-       u64 enable_mask = 0;
+       bool tx_clean_complete = true;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+                       ixgbe_update_tx_dca(adapter, ring);
+#endif
+               tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
@@ -1284,31 +1533,71 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
        budget = max(budget, 1);
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
+               ring = &(adapter->rx_ring[r_idx]);
 #ifdef CONFIG_IXGBE_DCA
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_rx_dca(adapter, rx_ring);
+                       ixgbe_update_rx_dca(adapter, ring);
 #endif
-               ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-               enable_mask |= rx_ring->v_idx;
+               ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
                                      r_idx + 1);
        }
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
+       ring = &(adapter->rx_ring[r_idx]);
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
-               if (adapter->itr_setting & 1)
+               if (adapter->rx_itr_setting & 1)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, enable_mask);
+                       ixgbe_irq_enable_queues(adapter,
+                                               ((u64)1 << q_vector->v_idx));
                return 0;
        }
 
        return work_done;
 }
+
+/**
+ * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
+{
+       struct ixgbe_q_vector *q_vector =
+                              container_of(napi, struct ixgbe_q_vector, napi);
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_ring *tx_ring = NULL;
+       int work_done = 0;
+       long r_idx;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       tx_ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_tx_dca(adapter, tx_ring);
+#endif
+
+       if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
+               work_done = budget;
+
+       /* If all Tx work done, exit the polling mode */
+       if (work_done < budget) {
+               napi_complete(napi);
+               if (adapter->tx_itr_setting & 1)
+                       ixgbe_set_itr_msix(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       }
+
+       return work_done;
+}
+
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                      int r_idx)
 {
@@ -1316,7 +1605,6 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
 
        set_bit(r_idx, q_vector->rxr_idx);
        q_vector->rxr_count++;
-       a->rx_ring[r_idx].v_idx = (u64)1 << v_idx;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
@@ -1326,7 +1614,6 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
 
        set_bit(t_idx, q_vector->txr_idx);
        q_vector->txr_count++;
-       a->tx_ring[t_idx].v_idx = (u64)1 << v_idx;
 }
 
 /**
@@ -1449,7 +1736,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 
        sprintf(adapter->name[vector], "%s:lsc", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+                         ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
        if (err) {
                DPRINTK(PROBE, ERR,
                        "request_irq for msix_lsc failed: %d\n", err);
@@ -1505,14 +1792,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
        }
 
        if (new_itr != q_vector->eitr) {
-               u32 itr_reg;
+               /* do an exponential smoothing */
+               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
-               /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-               ixgbe_write_eitr(adapter, 0, itr_reg);
+
+               ixgbe_write_eitr(q_vector);
        }
 
        return;
@@ -1533,11 +1819,21 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
+               if (adapter->num_vfs)
+                       mask |= IXGBE_EIMS_MAILBOX;
        }
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               mask |= IXGBE_EIMS_FLOW_DIR;
 
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
        ixgbe_irq_enable_queues(adapter, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
+
+       if (adapter->num_vfs > 32) {
+               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+       }
 }
 
 /**
@@ -1618,10 +1914,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                err = ixgbe_request_msix_irqs(adapter);
        } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-               err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+               err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
                                  netdev->name, netdev);
        } else {
-               err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+               err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
                                  netdev->name, netdev);
        }
 
@@ -1667,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+               if (adapter->num_vfs > 32)
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1687,7 +1985,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
 
        IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
-                       EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
+                       EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
 
        ixgbe_set_ivar(adapter, 0, 0, 0);
        ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -1724,63 +2022,81 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
                adapter->tx_ring[i].head = IXGBE_TDH(j);
                adapter->tx_ring[i].tail = IXGBE_TDT(j);
-               /* Disable Tx Head Writeback RO bit, since this hoses
+               /*
+                * Disable Tx Head Writeback RO bit, since this hoses
                 * bookkeeping if things aren't delivered in order.
                 */
-               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+                       break;
+               case ixgbe_mac_82599EB:
+               default:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+                       break;
+               }
                txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+                       break;
+               case ixgbe_mac_82599EB:
+               default:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+                       break;
+               }
        }
+
        if (hw->mac.type == ixgbe_mac_82599EB) {
-               /* We enable 8 traffic classes, DCB only */
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-                       IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
-                                       IXGBE_MTQC_8TC_8TQ));
+               u32 rttdcs;
+               u32 mask;
+
+               /* disable the arbiter while setting MTQC */
+               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               rttdcs |= IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+               /* set transmit pool layout */
+               mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+               switch (adapter->flags & mask) {
+
+               case (IXGBE_FLAG_SRIOV_ENABLED):
+                       IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+                                       (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+                       break;
+
+               case (IXGBE_FLAG_DCB_ENABLED):
+                       /* We enable 8 traffic classes, DCB only */
+                       IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+                                     (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+                       break;
+
+               default:
+                       IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+                       break;
+               }
+
+               /* re-eable the arbiter */
+               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
        }
 }
 
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 
-static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
+                                   struct ixgbe_ring *rx_ring)
 {
-       struct ixgbe_ring *rx_ring;
        u32 srrctl;
-       int queue0 = 0;
-       unsigned long mask;
+       int index;
        struct ixgbe_ring_feature *feature = adapter->ring_feature;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       int dcb_i = feature[RING_F_DCB].indices;
-                       if (dcb_i == 8)
-                               queue0 = index >> 4;
-                       else if (dcb_i == 4)
-                               queue0 = index >> 5;
-                       else
-                               dev_err(&adapter->pdev->dev, "Invalid DCB "
-                                       "configuration\n");
-#ifdef IXGBE_FCOE
-                       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-                               struct ixgbe_ring_feature *f;
-
-                               rx_ring = &adapter->rx_ring[queue0];
-                               f = &adapter->ring_feature[RING_F_FCOE];
-                               if ((queue0 == 0) && (index > rx_ring->reg_idx))
-                                       queue0 = f->mask + index -
-                                                rx_ring->reg_idx - 1;
-                       }
-#endif /* IXGBE_FCOE */
-               } else {
-                       queue0 = index;
-               }
-       } else {
+       index = rx_ring->reg_idx;
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               unsigned long mask;
                mask = (unsigned long) feature[RING_F_RSS].mask;
-               queue0 = index & mask;
                index = index & mask;
        }
-
-       rx_ring = &adapter->rx_ring[queue0];
-
        srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
 
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1789,7 +2105,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
        srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                  IXGBE_SRRCTL_BSIZEHDR_MASK;
 
-       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
                srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
@@ -1817,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCB
                                 | IXGBE_FLAG_DCB_ENABLED
 #endif
+                                | IXGBE_FLAG_SRIOV_ENABLED
                                );
 
        switch (mask) {
        case (IXGBE_FLAG_RSS_ENABLED):
                mrqc = IXGBE_MRQC_RSSEN;
                break;
+       case (IXGBE_FLAG_SRIOV_ENABLED):
+               mrqc = IXGBE_MRQC_VMDQEN;
+               break;
 #ifdef CONFIG_IXGBE_DCB
        case (IXGBE_FLAG_DCB_ENABLED):
                mrqc = IXGBE_MRQC_RT8TCEN;
@@ -1836,6 +2156,50 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 }
 
 /**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter:    address of board private structure
+ * @index:      index of ring to set
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
+{
+       struct ixgbe_ring *rx_ring;
+       struct ixgbe_hw *hw = &adapter->hw;
+       int j;
+       u32 rscctrl;
+       int rx_buf_len;
+
+       rx_ring = &adapter->rx_ring[index];
+       j = rx_ring->reg_idx;
+       rx_buf_len = rx_ring->rx_buf_len;
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+       rscctrl |= IXGBE_RSCCTL_RSCEN;
+       /*
+        * we must limit the number of descriptors so that the
+        * total size of max desc * buf_len is not greater
+        * than 65535
+        */
+       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+       } else {
+               if (rx_buf_len < IXGBE_RXBUFFER_4096)
+                       rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+               else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+                       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+               else
+                       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+}
+
+/**
  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  * @adapter: board private structure
  *
@@ -1845,6 +2209,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 {
        u64 rdba;
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_ring *rx_ring;
        struct net_device *netdev = adapter->netdev;
        int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        int i, j;
@@ -1855,16 +2220,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        u32 fctrl, hlreg0;
        u32 reta = 0, mrqc = 0;
        u32 rdrxctl;
-       u32 rscctrl;
        int rx_buf_len;
 
        /* Decide whether to use packet split mode or not */
-       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-
-#ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-#endif /* IXGBE_FCOE */
+       /* Do not use packet split if we're in SR-IOV Mode */
+       if (!adapter->num_vfs)
+               adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -1876,10 +2237,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                                      IXGBE_PSRTYPE_IPV4HDR |
                                      IXGBE_PSRTYPE_IPV6HDR |
                                      IXGBE_PSRTYPE_L2HDR;
-                       IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+                       IXGBE_WRITE_REG(hw,
+                                       IXGBE_PSRTYPE(adapter->num_vfs),
+                                       psrtype);
                }
        } else {
-               if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) &&
+               if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
                    (netdev->mtu <= ETH_DATA_LEN))
                        rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
                else
@@ -1898,7 +2261,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        else
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 #ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+       if (netdev->features & NETIF_F_FCOE_MTU)
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 #endif
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -1913,29 +2276,37 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
         * the Base and Length of the Rx Descriptor Ring
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               rdba = adapter->rx_ring[i].dma;
-               j = adapter->rx_ring[i].reg_idx;
+               rx_ring = &adapter->rx_ring[i];
+               rdba = rx_ring->dma;
+               j = rx_ring->reg_idx;
                IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
                IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
                IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
                IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
                IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
-               adapter->rx_ring[i].head = IXGBE_RDH(j);
-               adapter->rx_ring[i].tail = IXGBE_RDT(j);
-               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+               rx_ring->head = IXGBE_RDH(j);
+               rx_ring->tail = IXGBE_RDT(j);
+               rx_ring->rx_buf_len = rx_buf_len;
+
+               if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+                       rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+               else
+                       rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
 
 #ifdef IXGBE_FCOE
-               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               if (netdev->features & NETIF_F_FCOE_MTU) {
                        struct ixgbe_ring_feature *f;
                        f = &adapter->ring_feature[RING_F_FCOE];
-                       if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
-                           (i >= f->mask) && (i < f->mask + f->indices))
-                               adapter->rx_ring[i].rx_buf_len =
-                                       IXGBE_FCOE_JUMBO_FRAME_SIZE;
+                       if ((i >= f->mask) && (i < f->mask + f->indices)) {
+                               rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                               if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+                                       rx_ring->rx_buf_len =
+                                               IXGBE_FCOE_JUMBO_FRAME_SIZE;
+                       }
                }
 
 #endif /* IXGBE_FCOE */
-               ixgbe_configure_srrctl(adapter, j);
+               ixgbe_configure_srrctl(adapter, rx_ring);
        }
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1954,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
        }
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               u32 vt_reg_bits;
+               u32 reg_offset, vf_shift;
+               u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+               vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+                       | IXGBE_VT_CTL_REPLEN;
+               vt_reg_bits |= (adapter->num_vfs <<
+                               IXGBE_VT_CTL_POOL_SHIFT);
+               IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+               vf_shift = adapter->num_vfs % 32;
+               reg_offset = adapter->num_vfs / 32;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+               /* Enable only the PF's pool for Tx/Rx */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+               ixgbe_set_vmolr(hw, adapter->num_vfs);
+       }
+
        /* Program MRQC for the distribution of queues */
        mrqc = ixgbe_setup_mrqc(adapter);
 
@@ -1985,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        }
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
+       if (adapter->num_vfs) {
+               u32 reg;
+
+               /* Map PF MAC address in RAR Entry 0 to first pool
+                * following VFs */
+               hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+               /* Set up VF register offsets for selected VT Mode, i.e.
+                * 64 VFs for SR-IOV */
+               reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+               reg |= IXGBE_GCR_EXT_SRIOV;
+               IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+       }
+
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2008,37 +2417,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
        }
 
-       if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) {
+       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                /* Enable 82599 HW-RSC */
-               for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i].reg_idx;
-                       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
-                       rscctrl |= IXGBE_RSCCTL_RSCEN;
-                       /*
-                        * we must limit the number of descriptors so that the
-                        * total size of max desc * buf_len is not greater
-                        * than 65535
-                        */
-                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-#if (MAX_SKB_FRAGS > 16)
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#else
-                               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-#endif
-                       } else {
-                               if (rx_buf_len < IXGBE_RXBUFFER_4096)
-                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-                               else if (rx_buf_len < IXGBE_RXBUFFER_8192)
-                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-                               else
-                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-                       }
-                       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
-               }
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       ixgbe_configure_rscctl(adapter, i);
+
                /* Disable RSC for ACK packets */
                IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
                   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -2049,15 +2432,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int pool_ndx = adapter->num_vfs;
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
 }
 
 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int pool_ndx = adapter->num_vfs;
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_disable(adapter);
@@ -2068,7 +2453,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
                ixgbe_irq_enable(adapter);
 
        /* remove VID from filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
 }
 
 static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2088,23 +2473,25 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
         * not in DCB mode.
         */
        ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+
+       /* Disable CFI check */
+       ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+
+       /* enable VLAN tag stripping */
        if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
-               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+               ctrl |= IXGBE_VLNCTRL_VME;
        } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               ctrl |= IXGBE_VLNCTRL_VFE;
-               /* enable VLAN tag insert/strip */
-               ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
                for (i = 0; i < adapter->num_rx_queues; i++) {
+                       u32 ctrl;
                        j = adapter->rx_ring[i].reg_idx;
                        ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
                        ctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
                }
        }
+
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+
        ixgbe_vlan_rx_add_vid(netdev, 0);
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2149,7 +2536,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
  * responsible for configuring the hardware for proper unicast, multicast and
  * promiscuous mode.
  **/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -2181,7 +2568,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 
        /* reprogram secondary unicast list */
-       hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list);
+       hw->mac.ops.update_uc_addr_list(hw, netdev);
 
        /* reprogram multicast list */
        addr_count = netdev->mc_count;
@@ -2189,6 +2576,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
                addr_list = netdev->mc_list->dmi_addr;
        hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
                                        ixgbe_addr_list_itr);
+       if (adapter->num_vfs)
+               ixgbe_restore_vf_multicasts(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2204,12 +2593,15 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                struct napi_struct *napi;
                q_vector = adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
                napi = &q_vector->napi;
-               if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
-                   (q_vector->rxr_count > 1))
-                       napi->poll = &ixgbe_clean_rxonly_many;
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+                       if (!q_vector->rxr_count || !q_vector->txr_count) {
+                               if (q_vector->txr_count == 1)
+                                       napi->poll = &ixgbe_clean_txonly;
+                               else if (q_vector->rxr_count == 1)
+                                       napi->poll = &ixgbe_clean_rxonly;
+                       }
+               }
 
                napi_enable(napi);
        }
@@ -2227,8 +2619,6 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
                napi_disable(&q_vector->napi);
        }
 }
@@ -2286,6 +2676,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct ixgbe_hw *hw = &adapter->hw;
        int i;
 
        ixgbe_set_rx_mode(netdev);
@@ -2293,7 +2684,10 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        ixgbe_restore_vlan(adapter);
 #ifdef CONFIG_IXGBE_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               netif_set_gso_max_size(netdev, 32768);
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       netif_set_gso_max_size(netdev, 32768);
+               else
+                       netif_set_gso_max_size(netdev, 65536);
                ixgbe_configure_dcb(adapter);
        } else {
                netif_set_gso_max_size(netdev, 65536);
@@ -2307,6 +2701,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
                ixgbe_configure_fcoe(adapter);
 
 #endif /* IXGBE_FCOE */
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       adapter->tx_ring[i].atr_sample_rate =
+                                                      adapter->atr_sample_rate;
+               ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
+       } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+               ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
+       }
+
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2370,7 +2773,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
 {
        u32 autoneg;
-       bool link_up = false;
+       bool negotiation, link_up = false;
        u32 ret = IXGBE_ERR_LINK_SETUP;
 
        if (hw->mac.ops.check_link)
@@ -2380,13 +2783,12 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
                goto link_cfg_out;
 
        if (hw->mac.ops.get_link_capabilities)
-               ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
-                                                       &hw->mac.autoneg);
+               ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
        if (ret)
                goto link_cfg_out;
 
-       if (hw->mac.ops.setup_link_speed)
-               ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
+       if (hw->mac.ops.setup_link)
+               ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
 link_cfg_out:
        return ret;
 }
@@ -2424,6 +2826,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        u32 txdctl, rxdctl, mhadd;
        u32 dmatxctl;
        u32 gpie;
+       u32 ctrl_ext;
 
        ixgbe_get_hw_control(adapter);
 
@@ -2436,12 +2839,31 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                        /* MSI only */
                        gpie = 0;
                }
+               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+                       gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+                       gpie |= IXGBE_GPIE_VTMODE_64;
+               }
                /* XXX: to interrupt immediately for EICS writes, enable this */
                /* gpie |= IXGBE_GPIE_EIMEN; */
                IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
 
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               /*
+                * use EIAM to auto-mask when MSI-X interrupt is asserted
+                * this saves a register write for every interrupt
+                */
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+                       break;
+               default:
+               case ixgbe_mac_82599EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+                       break;
+               }
+       } else {
                /* legacy interrupts, use EIAM to auto-mask when reading EICR,
                 * specifically only auto mask tx and rx interrupts */
                IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
@@ -2463,7 +2885,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 
 #ifdef IXGBE_FCOE
        /* adjust max frame to be able to do baby jumbo for FCoE */
-       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+       if ((netdev->features & NETIF_F_FCOE_MTU) &&
            (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
                max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
 
@@ -2495,6 +2917,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       int wait_loop = 10;
+                       /* poll for Tx Enable ready */
+                       do {
+                               msleep(1);
+                               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+                       } while (--wait_loop &&
+                                !(txdctl & IXGBE_TXDCTL_ENABLE));
+                       if (!wait_loop)
+                               DPRINTK(DRV, ERR, "Could not enable "
+                                       "Tx Queue %d\n", j);
+               }
        }
 
        for (i = 0; i < num_rx_rings; i++) {
@@ -2543,16 +2977,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 
        /*
         * For hot-pluggable SFP+ devices, a new SFP+ module may have
-        * arrived before interrupts were enabled.  We need to kick off
-        * the SFP+ module setup first, then try to bring up link.
+        * arrived before interrupts were enabled but after probe.  Such
+        * devices wouldn't have their type identified yet. We need to
+        * kick off the SFP+ module setup first, then try to bring up link.
         * If we're not hot-pluggable SFP+, we just need to configure link
         * and bring it up.
         */
-       err = hw->phy.ops.identify(hw);
-       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
-               ixgbe_down(adapter);
-               return err;
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               err = hw->phy.ops.identify(hw);
+               if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       /*
+                        * Take the device down and schedule the sfp tasklet
+                        * which will unregister_netdev and log it.
+                        */
+                       ixgbe_down(adapter);
+                       schedule_work(&adapter->sfp_config_module_task);
+                       return err;
+               }
        }
 
        if (ixgbe_is_sfp(hw)) {
@@ -2563,6 +3004,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                        DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
        }
 
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               set_bit(__IXGBE_FDIR_INIT_DONE,
+                       &(adapter->tx_ring[i].reinit_state));
+
        /* enable transmits */
        netif_tx_start_all_queues(netdev);
 
@@ -2571,6 +3016,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
        adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
+
+       /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
        return 0;
 }
 
@@ -2605,12 +3056,22 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        case IXGBE_ERR_MASTER_REQUESTS_PENDING:
                dev_err(&adapter->pdev->dev, "master disable timed out\n");
                break;
+       case IXGBE_ERR_EEPROM_VERSION:
+               /* We are running on a pre-production device, log a warning */
+               dev_warn(&adapter->pdev->dev, "This device is a pre-production "
+                        "adapter/LOM.  Please be aware there may be issues "
+                        "associated with your hardware.  If you are "
+                        "experiencing problems please contact your Intel or "
+                        "hardware representative who provided you with this "
+                        "hardware.\n");
+               break;
        default:
                dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
        }
 
        /* reprogram the RAR[0] in case user changed it. */
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+                           IXGBE_RAH_AV);
 }
 
 /**
@@ -2646,11 +3107,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                                dev_kfree_skb(this);
                        } while (skb);
                }
-               if (!rx_buffer_info->page)
-                       continue;
-               pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
-                              PCI_DMA_FROMDEVICE);
-               rx_buffer_info->page_dma = 0;
+               if (!rx_buffer_info->page)
+                       continue;
+               if (rx_buffer_info->page_dma) {
+                       pci_unmap_page(pdev, rx_buffer_info->page_dma,
+                                      PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+                       rx_buffer_info->page_dma = 0;
+               }
                put_page(rx_buffer_info->page);
                rx_buffer_info->page = NULL;
                rx_buffer_info->page_offset = 0;
@@ -2740,6 +3203,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
 
+       /* disable receive for all VFs and wait one second */
+       if (adapter->num_vfs) {
+               for (i = 0 ; i < adapter->num_vfs; i++)
+                       adapter->vfinfo[i].clear_to_send = 0;
+
+               /* ping all the active vfs to let them know we are going down */
+               ixgbe_ping_all_vfs(adapter);
+               /* Disable all VFTE/VFRE TX/RX */
+               ixgbe_disable_tx_rx(adapter);
+       }
+
        /* disable receives */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2755,9 +3229,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        ixgbe_napi_disable_all(adapter);
 
+       clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+       del_timer_sync(&adapter->sfp_timer);
        del_timer_sync(&adapter->watchdog_timer);
        cancel_work_sync(&adapter->watchdog_task);
 
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               cancel_work_sync(&adapter->fdir_reinit_task);
+
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
                j = adapter->tx_ring[i].reg_idx;
@@ -2805,7 +3285,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
        }
 #endif
 
-       tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+       tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
        ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
 
        if (!tx_clean_complete)
@@ -2814,7 +3294,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
-               if (adapter->itr_setting & 1)
+               if (adapter->rx_itr_setting & 1)
                        ixgbe_set_itr(adapter);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
                        ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
@@ -2892,6 +3372,38 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
        return ret;
 }
 
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session.  This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+       bool ret = false;
+       struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+       f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
+       f_fdir->mask = 0;
+
+       /* Flow Director must have RSS enabled */
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+           ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
+               adapter->num_tx_queues = f_fdir->indices;
+               adapter->num_rx_queues = f_fdir->indices;
+               ret = true;
+       } else {
+               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+       }
+       return ret;
+}
+
 #ifdef IXGBE_FCOE
 /**
  * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
@@ -2910,21 +3422,26 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 
        f->indices = min((int)num_online_cpus(), f->indices);
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               adapter->num_rx_queues = 1;
+               adapter->num_tx_queues = 1;
 #ifdef CONFIG_IXGBE_DCB
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n");
+                       DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
                        ixgbe_set_dcb_queues(adapter);
                }
 #endif
                if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-                       DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
-                       ixgbe_set_rss_queues(adapter);
+                       DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
+                       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+                           (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                               ixgbe_set_fdir_queues(adapter);
+                       else
+                               ixgbe_set_rss_queues(adapter);
                }
                /* adding FCoE rx rings to the end */
                f->mask = adapter->num_rx_queues;
                adapter->num_rx_queues += f->indices;
-               if (adapter->num_tx_queues == 0)
-                       adapter->num_tx_queues = f->indices;
+               adapter->num_tx_queues += f->indices;
 
                ret = true;
        }
@@ -2933,6 +3450,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 }
 
 #endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+       return false;
+}
+
 /*
  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
@@ -2946,6 +3476,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
+       /* Start with base case */
+       adapter->num_rx_queues = 1;
+       adapter->num_tx_queues = 1;
+       adapter->num_rx_pools = adapter->num_rx_queues;
+       adapter->num_rx_queues_per_pool = 1;
+
+       if (ixgbe_set_sriov_queues(adapter))
+               return;
+
 #ifdef IXGBE_FCOE
        if (ixgbe_set_fcoe_queues(adapter))
                goto done;
@@ -2956,6 +3495,9 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
                goto done;
 
 #endif
+       if (ixgbe_set_fdir_queues(adapter))
+               goto done;
+
        if (ixgbe_set_rss_queues(adapter))
                goto done;
 
@@ -3126,6 +3668,31 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 }
 #endif
 
+/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+       int i;
+       bool ret = false;
+
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+           ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       adapter->rx_ring[i].reg_idx = i;
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       adapter->tx_ring[i].reg_idx = i;
+               ret = true;
+       }
+
+       return ret;
+}
+
 #ifdef IXGBE_FCOE
 /**
  * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
@@ -3136,23 +3703,52 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
  */
 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 {
-       int i, fcoe_i = 0;
+       int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
        bool ret = false;
        struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
 
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
                        ixgbe_cache_ring_dcb(adapter);
-                       fcoe_i = adapter->rx_ring[0].reg_idx + 1;
+                       /* find out queues in TC for FCoE */
+                       fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
+                       fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+                       /*
+                        * In 82599, the number of Tx queues for each traffic
+                        * class for both 8-TC and 4-TC modes are:
+                        * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+                        * 8 TCs:  32  32  16  16   8   8   8   8
+                        * 4 TCs:  64  64  32  32
+                        * We have max 8 queues for FCoE, where 8 the is
+                        * FCoE redirection table size. If TC for FCoE is
+                        * less than or equal to TC3, we have enough queues
+                        * to add max of 8 queues for FCoE, so we start FCoE
+                        * tx descriptor from the next one, i.e., reg_idx + 1.
+                        * If TC for FCoE is above TC3, implying 8 TC mode,
+                        * and we need 8 for FCoE, we have to take all queues
+                        * in that traffic class for FCoE.
+                        */
+                       if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+                               fcoe_tx_i--;
                }
 #endif /* CONFIG_IXGBE_DCB */
                if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-                       ixgbe_cache_ring_rss(adapter);
-                       fcoe_i = f->mask;
+                       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+                           (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                               ixgbe_cache_ring_fdir(adapter);
+                       else
+                               ixgbe_cache_ring_rss(adapter);
+
+                       fcoe_rx_i = f->mask;
+                       fcoe_tx_i = f->mask;
+               }
+               for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+                       adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
+                       adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
                }
-               for (i = 0; i < f->indices; i++, fcoe_i++)
-                       adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
                ret = true;
        }
        return ret;
@@ -3160,6 +3756,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 
 #endif /* IXGBE_FCOE */
 /**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+       adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
+       adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
+       if (adapter->num_vfs)
+               return true;
+       else
+               return false;
+}
+
+/**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
  *
@@ -3176,6 +3790,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
        adapter->rx_ring[0].reg_idx = 0;
        adapter->tx_ring[0].reg_idx = 0;
 
+       if (ixgbe_cache_ring_sriov(adapter))
+               return;
+
 #ifdef IXGBE_FCOE
        if (ixgbe_cache_ring_fcoe(adapter))
                return;
@@ -3186,6 +3803,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
                return;
 
 #endif
+       if (ixgbe_cache_ring_fdir(adapter))
+               return;
+
        if (ixgbe_cache_ring_rss(adapter))
                return;
 }
@@ -3249,10 +3869,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
         * It's easy to be greedy for MSI-X vectors, but it really
         * doesn't do us much good if we have a lot more vectors
         * than CPU's.  So let's be conservative and only ask for
-        * (roughly) twice the number of vectors as there are CPU's.
+        * (roughly) the same number of vectors as there are CPU's.
         */
        v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
-                      (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+                      (int)num_online_cpus()) + NON_Q_VECTORS;
 
        /*
         * At the same time, hardware can only support a maximum of
@@ -3279,6 +3899,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 
        adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
        adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+       adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+       adapter->atr_sample_rate = 0;
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               ixgbe_disable_sriov(adapter);
+
        ixgbe_set_num_queues(adapter);
 
        err = pci_enable_msi(adapter->pdev);
@@ -3312,7 +3938,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
                napi_vectors = adapter->num_rx_queues;
-               poll = &ixgbe_clean_rxonly;
+               poll = &ixgbe_clean_rxtx_many;
        } else {
                num_q_vectors = 1;
                napi_vectors = 1;
@@ -3324,11 +3950,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
                if (!q_vector)
                        goto err_out;
                q_vector->adapter = adapter;
+               if (q_vector->txr_count && !q_vector->rxr_count)
+                       q_vector->eitr = adapter->tx_eitr_param;
+               else
+                       q_vector->eitr = adapter->rx_eitr_param;
                q_vector->v_idx = q_idx;
-               q_vector->eitr = adapter->eitr_param;
-               if (q_idx < napi_vectors)
-                       netif_napi_add(adapter->netdev, &q_vector->napi,
-                                      (*poll), 64);
+               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
                adapter->q_vector[q_idx] = q_vector;
        }
 
@@ -3356,27 +3983,21 @@ err_out:
 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
 {
        int q_idx, num_q_vectors;
-       int napi_vectors;
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-               napi_vectors = adapter->num_rx_queues;
-       } else {
+       else
                num_q_vectors = 1;
-               napi_vectors = 1;
-       }
 
        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
                struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
-
                adapter->q_vector[q_idx] = NULL;
-               if (q_idx < napi_vectors)
-                       netif_napi_del(&q_vector->napi);
+               netif_napi_del(&q_vector->napi);
                kfree(q_vector);
        }
 }
 
-void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 {
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
@@ -3490,14 +4111,15 @@ static void ixgbe_sfp_task(struct work_struct *work)
        if ((hw->phy.type == ixgbe_phy_nl) &&
            (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
                s32 ret = hw->phy.ops.identify_sfp(hw);
-               if (ret)
+               if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
                        goto reschedule;
                ret = hw->phy.ops.reset(hw);
                if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-                       DPRINTK(PROBE, ERR, "failed to initialize because an "
-                               "unsupported SFP+ module type was detected.\n"
-                               "Reload the driver after installing a "
-                               "supported module.\n");
+                       dev_err(&adapter->pdev->dev, "failed to initialize "
+                               "because an unsupported SFP+ module type "
+                               "was detected.\n"
+                               "Reload the driver after installing a "
+                               "supported module.\n");
                        unregister_netdev(adapter->netdev);
                } else {
                        DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -3550,11 +4172,21 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
        } else if (hw->mac.type == ixgbe_mac_82599EB) {
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
-               adapter->flags |= IXGBE_FLAG_RSC_CAPABLE;
-               adapter->flags |= IXGBE_FLAG_RSC_ENABLED;
+               adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+               adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               adapter->ring_feature[RING_F_FDIR].indices =
+                                                        IXGBE_MAX_FDIR_INDICES;
+               adapter->atr_sample_rate = 20;
+               adapter->fdir_pballoc = 0;
 #ifdef IXGBE_FCOE
-               adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
-               adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
+               adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+               adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+               adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
+               /* Default traffic class to use for FCoE */
+               adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+#endif
 #endif /* IXGBE_FCOE */
        }
 
@@ -3592,8 +4224,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        hw->fc.disable_fc_autoneg = false;
 
        /* enable itr by default in dynamic mode */
-       adapter->itr_setting = 1;
-       adapter->eitr_param = 20000;
+       adapter->rx_itr_setting = 1;
+       adapter->rx_eitr_param = 20000;
+       adapter->tx_itr_setting = 1;
+       adapter->tx_eitr_param = 10000;
 
        /* set defaults for eitr in MegaBytes */
        adapter->eitr_low = 10;
@@ -3949,6 +4583,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
+       /*
+        * pci_restore_state clears dev->state_saved so call
+        * pci_save_state to restore it.
+        */
+       pci_save_state(pdev);
 
        err = pci_enable_device_mem(pdev);
        if (err) {
@@ -4083,20 +4722,35 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
  **/
 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
+       struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+       u64 non_eop_descs = 0, restart_queue = 0;
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
+       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                u64 rsc_count = 0;
+               u64 rsc_flush = 0;
                for (i = 0; i < 16; i++)
                        adapter->hw_rx_no_dma_resources +=
                                             IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-               for (i = 0; i < adapter->num_rx_queues; i++)
+               for (i = 0; i < adapter->num_rx_queues; i++) {
                        rsc_count += adapter->rx_ring[i].rsc_count;
-               adapter->rsc_count = rsc_count;
+                       rsc_flush += adapter->rx_ring[i].rsc_flush;
+               }
+               adapter->rsc_total_count = rsc_count;
+               adapter->rsc_total_flush = rsc_flush;
        }
 
+       /* gather some stats to the adapter struct that are per queue */
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               restart_queue += adapter->tx_ring[i].restart_queue;
+       adapter->restart_queue = restart_queue;
+
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+       adapter->non_eop_descs = non_eop_descs;
+
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
                /* for packet buffers not used, the register should read 0 */
@@ -4133,14 +4787,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 
        /* 82598 hardware only has a 32 bit counter in the high register */
        if (hw->mac.type == ixgbe_mac_82599EB) {
+               u64 tmp;
                adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+               tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
+               adapter->stats.gorc += (tmp << 32);
                adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+               tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
+               adapter->stats.gotc += (tmp << 32);
                adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
                IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+               adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+               adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
 #ifdef IXGBE_FCOE
                adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
                adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
@@ -4197,15 +4856,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
 
        /* Fill out the OS statistics structure */
-       adapter->net_stats.multicast = adapter->stats.mprc;
+       netdev->stats.multicast = adapter->stats.mprc;
 
        /* Rx Errors */
-       adapter->net_stats.rx_errors = adapter->stats.crcerrs +
+       netdev->stats.rx_errors = adapter->stats.crcerrs +
                                       adapter->stats.rlec;
-       adapter->net_stats.rx_dropped = 0;
-       adapter->net_stats.rx_length_errors = adapter->stats.rlec;
-       adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-       adapter->net_stats.rx_missed_errors = total_mpc;
+       netdev->stats.rx_dropped = 0;
+       netdev->stats.rx_length_errors = adapter->stats.rlec;
+       netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+       netdev->stats.rx_missed_errors = total_mpc;
 }
 
 /**
@@ -4216,57 +4875,43 @@ static void ixgbe_watchdog(unsigned long data)
 {
        struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
        struct ixgbe_hw *hw = &adapter->hw;
+       u64 eics = 0;
+       int i;
 
-       /* Do the watchdog outside of interrupt context due to the lovely
-        * delays that some of the newer hardware requires */
-       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
-               u64 eics = 0;
-               int i;
+       /*
+        *  Do the watchdog outside of interrupt context due to the lovely
+        * delays that some of the newer hardware requires
+        */
 
-               for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
-                       eics |= ((u64)1 << i);
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               goto watchdog_short_circuit;
 
-               /* Cause software interrupt to ensure rx rings are cleaned */
-               switch (hw->mac.type) {
-               case ixgbe_mac_82598EB:
-                       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics);
-                       } else {
-                               /*
-                                * for legacy and MSI interrupts don't set any
-                                * bits that are enabled for EIAM, because this
-                                * operation would set *both* EIMS and EICS for
-                                * any bit in EIAM
-                                */
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS,
-                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-                       }
-                       break;
-               case ixgbe_mac_82599EB:
-                       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0),
-                                               (u32)(eics & 0xFFFFFFFF));
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
-                                               (u32)(eics >> 32));
-                       } else {
-                               /*
-                                * for legacy and MSI interrupts don't set any
-                                * bits that are enabled for EIAM, because this
-                                * operation would set *both* EIMS and EICS for
-                                * any bit in EIAM
-                                */
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS,
-                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-                       }
-                       break;
-               default:
-                       break;
-               }
-               /* Reset the timer */
-               mod_timer(&adapter->watchdog_timer,
-                         round_jiffies(jiffies + 2 * HZ));
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+               /*
+                * for legacy and MSI interrupts don't set any bits
+                * that are enabled for EIAM, because this operation
+                * would set *both* EIMS and EICS for any bit in EIAM
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_EICS,
+                       (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+               goto watchdog_reschedule;
+       }
+
+       /* get one bit for every active tx/rx interrupt vector */
+       for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+               struct ixgbe_q_vector *qv = adapter->q_vector[i];
+               if (qv->rxr_count || qv->txr_count)
+                       eics |= ((u64)1 << i);
        }
 
+       /* Cause software interrupt to ensure rx rings are cleaned */
+       ixgbe_irq_rearm_queues(adapter, eics);
+
+watchdog_reschedule:
+       /* Reset the timer */
+       mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+
+watchdog_short_circuit:
        schedule_work(&adapter->watchdog_task);
 }
 
@@ -4281,13 +4926,14 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
                                                     multispeed_fiber_task);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 autoneg;
+       bool negotiation;
 
        adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
-       if (hw->mac.ops.get_link_capabilities)
-               hw->mac.ops.get_link_capabilities(hw, &autoneg,
-                                                 &hw->mac.autoneg);
-       if (hw->mac.ops.setup_link_speed)
-               hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+       autoneg = hw->phy.autoneg_advertised;
+       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+               hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+       if (hw->mac.ops.setup_link)
+               hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
        adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
 }
@@ -4305,10 +4951,17 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
        u32 err;
 
        adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
+
+       /* Time for electrical oscillations to settle down */
+       msleep(100);
        err = hw->phy.ops.identify_sfp(hw);
+
        if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
-               ixgbe_down(adapter);
+               dev_err(&adapter->pdev->dev, "failed to initialize because "
+                       "an unsupported SFP+ module type was detected.\n"
+                       "Reload the driver after installing a supported "
+                       "module.\n");
+               unregister_netdev(adapter->netdev);
                return;
        }
        hw->mac.ops.setup_sfp(hw);
@@ -4320,6 +4973,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
 }
 
 /**
+ * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_fdir_reinit_task(struct work_struct *work)
+{
+       struct ixgbe_adapter *adapter = container_of(work,
+                                                    struct ixgbe_adapter,
+                                                    fdir_reinit_task);
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+
+       if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       set_bit(__IXGBE_FDIR_INIT_DONE,
+                               &(adapter->tx_ring[i].reinit_state));
+       } else {
+               DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
+                       "ignored adding FDIR ATR filters \n");
+       }
+       /* Done FDIR Re-initialization, enable transmits */
+       netif_tx_start_all_queues(adapter->netdev);
+}
+
+/**
  * ixgbe_watchdog_task - worker thread to bring link up
  * @work: pointer to work_struct containing our data
  **/
@@ -4370,13 +5047,13 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                        if (hw->mac.type == ixgbe_mac_82599EB) {
                                u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
                                u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-                               flow_rx = (mflcn & IXGBE_MFLCN_RFCE);
-                               flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X);
+                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
                        } else {
                                u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
                                u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
-                               flow_rx = (frctl & IXGBE_FCTRL_RFCE);
-                               flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X);
+                               flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
+                               flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
                        }
 
                        printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
@@ -4456,14 +5133,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                                                                 iph->daddr, 0,
                                                                 IPPROTO_TCP,
                                                                 0);
-                       adapter->hw_tso_ctxt++;
-               } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+               } else if (skb_is_gso_v6(skb)) {
                        ipv6_hdr(skb)->payload_len = 0;
                        tcp_hdr(skb)->check =
                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                             &ipv6_hdr(skb)->daddr,
                                             0, IPPROTO_TCP, 0);
-                       adapter->hw_tso6_ctxt++;
                }
 
                i = tx_ring->next_to_use;
@@ -4546,7 +5221,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                                    IXGBE_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
+                       __be16 protocol;
+
+                       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+                               const struct vlan_ethhdr *vhdr =
+                                       (const struct vlan_ethhdr *)skb->data;
+
+                               protocol = vhdr->h_vlan_encapsulated_proto;
+                       } else {
+                               protocol = skb->protocol;
+                       }
+
+                       switch (protocol) {
                        case cpu_to_be16(ETH_P_IP):
                                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -4582,7 +5268,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
-               adapter->hw_csum_tx_good++;
                i++;
                if (i == tx_ring->count)
                        i = 0;
@@ -4599,23 +5284,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                         struct sk_buff *skb, u32 tx_flags,
                         unsigned int first)
 {
+       struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
-       dma_addr_t *map;
 
        i = tx_ring->next_to_use;
 
-       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-               return 0;
-       }
-
-       map = skb_shinfo(skb)->dma_maps;
-
        if (tx_flags & IXGBE_TX_FLAGS_FCOE)
                /* excluding fcoe_crc_eof for FCoE */
                total -= sizeof(struct fcoe_crc_eof);
@@ -4626,7 +5304,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                tx_buffer_info->length = size;
-               tx_buffer_info->dma = map[0] + offset;
+               tx_buffer_info->mapped_as_page = false;
+               tx_buffer_info->dma = pci_map_single(pdev,
+                                                    skb->data + offset,
+                                                    size, PCI_DMA_TODEVICE);
+               if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+                       goto dma_error;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
@@ -4647,7 +5330,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                frag = &skb_shinfo(skb)->frags[f];
                len = min((unsigned int)frag->size, total);
-               offset = 0;
+               offset = frag->page_offset;
 
                while (len) {
                        i++;
@@ -4658,7 +5341,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                        tx_buffer_info->length = size;
-                       tx_buffer_info->dma = map[f + 1] + offset;
+                       tx_buffer_info->dma = pci_map_page(adapter->pdev,
+                                                          frag->page,
+                                                          offset, size,
+                                                          PCI_DMA_TODEVICE);
+                       tx_buffer_info->mapped_as_page = true;
+                       if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+                               goto dma_error;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
 
@@ -4675,6 +5364,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 
        return count;
+
+dma_error:
+       dev_err(&pdev->dev, "TX DMA map failed\n");
+
+       /* clear timestamp and dma mappings for failed tx_buffer_info map */
+       tx_buffer_info->dma = 0;
+       tx_buffer_info->time_stamp = 0;
+       tx_buffer_info->next_to_watch = 0;
+       if (count)
+               count--;
+
+       /* clear timestamp and dma mappings for remaining portion of packet */
+       while (count--) {
+               if (i==0)
+                       i += tx_ring->count;
+               i--;
+               tx_buffer_info = &tx_ring->tx_buffer_info[i];
+               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+       }
+
+       return count;
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -4746,11 +5456,54 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
+static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+                     int queue, u32 tx_flags)
+{
+       /* Right now, we support IPv4 only */
+       struct ixgbe_atr_input atr_input;
+       struct tcphdr *th;
+       struct iphdr *iph = ip_hdr(skb);
+       struct ethhdr *eth = (struct ethhdr *)skb->data;
+       u16 vlan_id, src_port, dst_port, flex_bytes;
+       u32 src_ipv4_addr, dst_ipv4_addr;
+       u8 l4type = 0;
+
+       /* check if we're UDP or TCP */
+       if (iph->protocol == IPPROTO_TCP) {
+               th = tcp_hdr(skb);
+               src_port = th->source;
+               dst_port = th->dest;
+               l4type |= IXGBE_ATR_L4TYPE_TCP;
+               /* l4type IPv4 type is 0, no need to assign */
+       } else {
+               /* Unsupported L4 header, just bail here */
+               return;
+       }
+
+       memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+
+       vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
+                  IXGBE_TX_FLAGS_VLAN_SHIFT;
+       src_ipv4_addr = iph->saddr;
+       dst_ipv4_addr = iph->daddr;
+       flex_bytes = eth->h_proto;
+
+       ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
+       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
+       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
+       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
+       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+       /* src and dst are inverted, think how the receiver sees them */
+       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
+       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+
+       /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+       ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+}
+
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                                  struct ixgbe_ring *tx_ring, int size)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
        netif_stop_subqueue(netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
@@ -4764,7 +5517,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
        netif_start_subqueue(netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       ++tx_ring->restart_queue;
        return 0;
 }
 
@@ -4779,45 +5532,71 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
+       int txq = smp_processor_id();
+
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+               return txq;
 
+#ifdef IXGBE_FCOE
+       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+           (skb->protocol == htons(ETH_P_FCOE))) {
+               txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+               txq += adapter->ring_feature[RING_F_FCOE].mask;
+               return txq;
+       }
+#endif
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               return 0;  /* All traffic should default to class 0 */
+               return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
 
        return skb_tx_hash(dev, skb);
 }
 
-static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
+                                   struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *tx_ring;
+       struct netdev_queue *txq;
        unsigned int first;
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
-       int r_idx = 0, tso;
+       int tso;
        int count = 0;
        unsigned int f;
 
-       r_idx = skb->queue_mapping;
-       tx_ring = &adapter->tx_ring[r_idx];
-
        if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
-                       tx_flags |= (skb->queue_mapping << 13);
+                       tx_flags |= ((skb->queue_mapping & 0x7) << 13);
                }
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               tx_flags |= (skb->queue_mapping << 13);
-               tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
-               tx_flags |= IXGBE_TX_FLAGS_VLAN;
+               if (skb->priority != TC_PRIO_CONTROL) {
+                       tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+                       tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+                       tx_flags |= IXGBE_TX_FLAGS_VLAN;
+               } else {
+                       skb->queue_mapping =
+                               adapter->ring_feature[RING_F_DCB].indices-1;
+               }
        }
 
+       tx_ring = &adapter->tx_ring[skb->queue_mapping];
+
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-           (skb->protocol == htons(ETH_P_FCOE)))
+           (skb->protocol == htons(ETH_P_FCOE))) {
                tx_flags |= IXGBE_TX_FLAGS_FCOE;
-
+#ifdef IXGBE_FCOE
+#ifdef CONFIG_IXGBE_DCB
+               tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= ((adapter->fcoe.up << 13)
+                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
+#endif
+#endif
+       }
        /* four things can cause us to need a context descriptor */
        if (skb_is_gso(skb) ||
            (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -4864,6 +5643,20 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
        if (count) {
+               /* add the ATR filter if ATR is on */
+               if (tx_ring->atr_sample_rate) {
+                       ++tx_ring->atr_count;
+                       if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
+                            test_bit(__IXGBE_FDIR_INIT_DONE,
+                                      &tx_ring->reinit_state)) {
+                               ixgbe_atr(adapter, skb, tx_ring->queue_index,
+                                         tx_flags);
+                               tx_ring->atr_count = 0;
+                       }
+               }
+               txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
+               txq->tx_bytes += skb->len;
+               txq->tx_packets++;
                ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
                               hdr_len);
                ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
@@ -4878,21 +5671,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 }
 
 /**
- * ixgbe_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
-       /* only return the current stats */
-       return &adapter->net_stats;
-}
-
-/**
  * ixgbe_set_mac - Change the Ethernet Address of the NIC
  * @netdev: network interface device structure
  * @p: pointer to an address structure
@@ -4911,7 +5689,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+                           IXGBE_RAH_AV);
 
        return 0;
 }
@@ -4952,7 +5731,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
 
 /**
  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
- * netdev->dev_addr_list
+ * netdev->dev_addrs
  * @netdev: network interface device structure
  *
  * Returns non-zero on failure
@@ -4973,7 +5752,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
 
 /**
  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
- * netdev->dev_addr_list
+ * netdev->dev_addrs
  * @netdev: network interface device structure
  *
  * Returns non-zero on failure
@@ -5001,12 +5780,23 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
 static void ixgbe_netpoll(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       int i;
+
+       /* if interface is down do nothing */
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               return;
 
-       disable_irq(adapter->pdev->irq);
        adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
-       ixgbe_intr(adapter->pdev->irq, netdev);
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+               for (i = 0; i < num_q_vectors; i++) {
+                       struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+                       ixgbe_msix_clean_many(0, q_vector);
+               }
+       } else {
+               ixgbe_intr(adapter->pdev->irq, netdev);
+       }
        adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
-       enable_irq(adapter->pdev->irq);
 }
 #endif
 
@@ -5015,7 +5805,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_stop               = ixgbe_close,
        .ndo_start_xmit         = ixgbe_xmit_frame,
        .ndo_select_queue       = ixgbe_select_queue,
-       .ndo_get_stats          = ixgbe_get_stats,
        .ndo_set_rx_mode        = ixgbe_set_rx_mode,
        .ndo_set_multicast_list = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
@@ -5032,9 +5821,67 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
        .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+       .ndo_fcoe_enable = ixgbe_fcoe_enable,
+       .ndo_fcoe_disable = ixgbe_fcoe_disable,
+       .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
 #endif /* IXGBE_FCOE */
 };
 
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+                          const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+       struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+               return;
+
+       /* The 82599 supports up to 64 VFs per physical function
+        * but this implementation limits allocation to 63 so that
+        * basic networking resources are still available to the
+        * physical function
+        */
+       adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+       adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+       err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+       if (err) {
+               DPRINTK(PROBE, ERR,
+                       "Failed to enable PCI sriov: %d\n", err);
+               goto err_novfs;
+       }
+       /* If call to enable VFs succeeded then allocate memory
+        * for per VF control structures.
+        */
+       adapter->vfinfo =
+               kcalloc(adapter->num_vfs,
+                       sizeof(struct vf_data_storage), GFP_KERNEL);
+       if (adapter->vfinfo) {
+               /* Now that we're sure SR-IOV is enabled
+                * and memory allocated set up the mailbox parameters
+                */
+               ixgbe_init_mbx_params_pf(hw);
+               memcpy(&hw->mbx.ops, ii->mbx_ops,
+                      sizeof(hw->mbx.ops));
+
+               /* Disable RSC when in SR-IOV mode */
+               adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+                                    IXGBE_FLAG2_RSC_ENABLED);
+               return;
+       }
+
+       /* Oh oh */
+       DPRINTK(PROBE, ERR,
+               "Unable to allocate memory for VF "
+               "Data Storage - SRIOV disabled\n");
+       pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+       adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+       adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
 /**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -5088,12 +5935,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_pci_reg;
        }
 
-       err = pci_enable_pcie_error_reporting(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
-                                   "0x%x\n", err);
-               /* non-fatal, continue */
-       }
+       pci_enable_pcie_error_reporting(pdev);
 
        pci_set_master(pdev);
        pci_save_state(pdev);
@@ -5204,14 +6046,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                          round_jiffies(jiffies + (2 * HZ)));
                err = 0;
        } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               dev_err(&adapter->pdev->dev, "failed to load because an "
-                       "unsupported SFP+ module type was detected.\n");
+               dev_err(&adapter->pdev->dev, "failed to initialize because "
+                       "an unsupported SFP+ module type was detected.\n"
+                       "Reload the driver after installing a supported "
+                       "module.\n");
                goto err_sw_init;
        } else if (err) {
                dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
                goto err_sw_init;
        }
 
+       ixgbe_probe_vf(adapter, ii);
+
        netdev->features = NETIF_F_SG |
                           NETIF_F_IP_CSUM |
                           NETIF_F_HW_VLAN_TX |
@@ -5229,8 +6075,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->vlan_features |= NETIF_F_TSO;
        netdev->vlan_features |= NETIF_F_TSO6;
        netdev->vlan_features |= NETIF_F_IP_CSUM;
+       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+                                   IXGBE_FLAG_DCB_ENABLED);
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 
@@ -5239,23 +6089,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 #endif
 
 #ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+       if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
                if (hw->mac.ops.get_device_caps) {
                        hw->mac.ops.get_device_caps(hw, &device_caps);
-                       if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) {
-                               netdev->features |= NETIF_F_FCOE_CRC;
-                               netdev->features |= NETIF_F_FSO;
-                               netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
-                       } else {
-                               adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-                       }
+                       if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
+                               adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
                }
        }
 #endif /* IXGBE_FCOE */
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       if (adapter->flags & IXGBE_FLAG_RSC_ENABLED)
+       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                netdev->features |= NETIF_F_LRO;
 
        /* make sure the EEPROM is good */
@@ -5297,7 +6142,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                adapter->wol = 0;
                break;
        }
-       device_init_wakeup(&adapter->pdev->dev, true);
        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
        /* pick up the PCI bus settings for reporting later */
@@ -5334,8 +6178,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
 
        /* reset the hardware with the new settings */
-       hw->mac.ops.start_hw(hw);
+       err = hw->mac.ops.start_hw(hw);
 
+       if (err == IXGBE_ERR_EEPROM_VERSION) {
+               /* We are running on a pre-production device, log a warning */
+               dev_warn(&pdev->dev, "This device is a pre-production "
+                        "adapter/LOM.  Please be aware there may be issues "
+                        "associated with your hardware.  If you are "
+                        "experiencing problems please contact your Intel or "
+                        "hardware representative who provided you with this "
+                        "hardware.\n");
+       }
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
@@ -5344,12 +6197,23 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
+
 #ifdef CONFIG_IXGBE_DCA
        if (dca_add_requester(&pdev->dev) == 0) {
                adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
                ixgbe_setup_dca(adapter);
        }
 #endif
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+                       adapter->num_vfs);
+               for (i = 0; i < adapter->num_vfs; i++)
+                       ixgbe_vf_configuration(pdev, (i | 0x10000000));
+       }
+
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
 
@@ -5362,6 +6226,8 @@ err_register:
        ixgbe_clear_interrupt_scheme(adapter);
 err_sw_init:
 err_eeprom:
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               ixgbe_disable_sriov(adapter);
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->sfp_timer);
        cancel_work_sync(&adapter->sfp_task);
@@ -5392,7 +6258,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int err;
 
        set_bit(__IXGBE_DOWN, &adapter->state);
        /* clear the module not found bit to make sure the worker won't
@@ -5406,6 +6271,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        cancel_work_sync(&adapter->sfp_task);
        cancel_work_sync(&adapter->multispeed_fiber_task);
        cancel_work_sync(&adapter->sfp_config_module_task);
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               cancel_work_sync(&adapter->fdir_reinit_task);
        flush_scheduled_work();
 
 #ifdef CONFIG_IXGBE_DCA
@@ -5428,6 +6296,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               ixgbe_disable_sriov(adapter);
+
        ixgbe_clear_interrupt_scheme(adapter);
 
        ixgbe_release_hw_control(adapter);
@@ -5440,10 +6311,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 
        free_netdev(netdev);
 
-       err = pci_disable_pcie_error_reporting(pdev);
-       if (err)
-               dev_err(&pdev->dev,
-                       "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+       pci_disable_pcie_error_reporting(pdev);
 
        pci_disable_device(pdev);
 }
@@ -5495,6 +6363,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
        } else {
                pci_set_master(pdev);
                pci_restore_state(pdev);
+               pci_save_state(pdev);
 
                pci_wake_from_d3(pdev, false);