Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / drivers / net / ixgbe / ixgbe_main.c
index ffd1f16..f098816 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -45,6 +45,7 @@
 #include "ixgbe.h"
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -52,7 +53,7 @@ static const char ixgbe_driver_string[] =
 
 #define DRV_VERSION "2.0.44-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  *   Class, Class Mask, private data (not used) }
  */
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
         board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -96,6 +97,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
+        board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
@@ -122,6 +125,13 @@ static struct notifier_block dca_notifier = {
 };
 #endif
 
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+                 "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -129,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 gcr;
+       u32 gpie;
+       u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+       /* disable iov and allow time for transactions to clear */
+       pci_disable_sriov(adapter->pdev);
+#endif
+
+       /* turn off device IOV mode */
+       gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+       gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+       IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* set default pool back to 0 */
+       vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+       vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+       IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+       /* take a breather then clean up driver data */
+       msleep(100);
+       if (adapter->vfinfo)
+               kfree(adapter->vfinfo);
+       adapter->vfinfo = NULL;
+
+       adapter->num_vfs = 0;
+       adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
 {
        u32 ctrl_ext;
@@ -218,10 +263,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
                                              struct ixgbe_tx_buffer
                                              *tx_buffer_info)
 {
-       tx_buffer_info->dma = 0;
+       if (tx_buffer_info->dma) {
+               if (tx_buffer_info->mapped_as_page)
+                       pci_unmap_page(adapter->pdev,
+                                      tx_buffer_info->dma,
+                                      tx_buffer_info->length,
+                                      PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_single(adapter->pdev,
+                                        tx_buffer_info->dma,
+                                        tx_buffer_info->length,
+                                        PCI_DMA_TODEVICE);
+               tx_buffer_info->dma = 0;
+       }
        if (tx_buffer_info->skb) {
-               skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
-                             DMA_TO_DEVICE);
                dev_kfree_skb_any(tx_buffer_info->skb);
                tx_buffer_info->skb = NULL;
        }
@@ -242,18 +297,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
 static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
                                       struct ixgbe_ring *tx_ring)
 {
-       int tc;
        u32 txoff = IXGBE_TFCS_TXOFF;
 
 #ifdef CONFIG_IXGBE_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               int tc;
                int reg_idx = tx_ring->reg_idx;
                int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82598EB:
                        tc = reg_idx >> 2;
                        txoff = IXGBE_TFCS_TXOFF0;
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+                       break;
+               case ixgbe_mac_82599EB:
                        tc = 0;
                        txoff = IXGBE_TFCS_TXOFF;
                        if (dcb_i == 8) {
@@ -272,6 +329,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
                                                tc += (reg_idx - 96) >> 4;
                                }
                        }
+                       break;
+               default:
+                       tc = 0;
                }
                txoff <<= tc;
        }
@@ -403,7 +463,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
+                       ++tx_ring->restart_queue;
                }
        }
 
@@ -425,8 +485,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        tx_ring->total_packets += total_packets;
        tx_ring->stats.packets += total_packets;
        tx_ring->stats.bytes += total_bytes;
-       netdev->stats.tx_bytes += total_bytes;
-       netdev->stats.tx_packets += total_packets;
        return (count < tx_ring->work_limit);
 }
 
@@ -614,7 +672,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
 
        /* It must be a TCP or UDP packet with a valid checksum */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
-       adapter->hw_csum_rx_good++;
 }
 
 static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
@@ -671,14 +728,19 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 
                if (!bi->skb) {
                        struct sk_buff *skb;
-                       skb = netdev_alloc_skb_ip_align(adapter->netdev,
-                                                       rx_ring->rx_buf_len);
+                       /* netdev_alloc_skb reserves 32 bytes up front!! */
+                       uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
+                       skb = netdev_alloc_skb(adapter->netdev, bufsz);
 
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
 
+                       /* advance the data pointer to the next cache line */
+                       skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
+                                         - skb->data));
+
                        bi->skb = skb;
                        bi->dma = pci_map_single(pdev, skb->data,
                                                 rx_ring->rx_buf_len,
@@ -791,8 +853,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
                        len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
                               IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       if (hdr_info & IXGBE_RXDADV_SPH)
-                               adapter->rx_hdr_split++;
                        if (len > IXGBE_RX_HDR_SIZE)
                                len = IXGBE_RX_HDR_SIZE;
                        upper_len = le16_to_cpu(rx_desc->wb.upper.length);
@@ -802,7 +862,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
                cleaned = true;
                skb = rx_buffer_info->skb;
-               prefetch(skb->data - NET_IP_ALIGN);
+               prefetch(skb->data);
                rx_buffer_info->skb = NULL;
 
                if (rx_buffer_info->dma) {
@@ -874,7 +934,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                skb->next = next_buffer->skb;
                                skb->next->prev = skb;
                        }
-                       adapter->non_eop_descs++;
+                       rx_ring->non_eop_descs++;
                        goto next_desc;
                }
 
@@ -1008,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 
        /* set up to autoclear timer, and the vectors */
        mask = IXGBE_EIMS_ENABLE_MASK;
-       mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+       if (adapter->num_vfs)
+               mask &= ~(IXGBE_EIMS_OTHER |
+                         IXGBE_EIMS_MAILBOX |
+                         IXGBE_EIMS_LSC);
+       else
+               mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 }
 
@@ -1237,6 +1302,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
+       if (eicr & IXGBE_EICR_MAILBOX)
+               ixgbe_msg_task(adapter);
+
        if (hw->mac.type == ixgbe_mac_82598EB)
                ixgbe_check_fan_failure(adapter, eicr);
 
@@ -1317,8 +1385,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
                                      r_idx + 1);
        }
 
-       /* disable interrupts on this vector only */
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -1350,7 +1417,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
                return IRQ_HANDLED;
 
        /* disable interrupts on this vector only */
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -1385,8 +1452,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
                                      r_idx + 1);
        }
 
-       /* disable interrupts on this vector only */
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -1753,6 +1819,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
+               if (adapter->num_vfs)
+                       mask |= IXGBE_EIMS_MAILBOX;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1761,6 +1829,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
        ixgbe_irq_enable_queues(adapter, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
+
+       if (adapter->num_vfs > 32) {
+               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+       }
 }
 
 /**
@@ -1890,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+               if (adapter->num_vfs > 32)
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1974,18 +2049,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 
        if (hw->mac.type == ixgbe_mac_82599EB) {
                u32 rttdcs;
+               u32 mask;
 
                /* disable the arbiter while setting MTQC */
                rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
                rttdcs |= IXGBE_RTTDCS_ARBDIS;
                IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 
-               /* We enable 8 traffic classes, DCB only */
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-                       IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
-                                       IXGBE_MTQC_8TC_8TQ));
-               else
+               /* set transmit pool layout */
+               mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+               switch (adapter->flags & mask) {
+
+               case (IXGBE_FLAG_SRIOV_ENABLED):
+                       IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+                                       (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+                       break;
+
+               case (IXGBE_FLAG_DCB_ENABLED):
+                       /* We enable 8 traffic classes, DCB only */
+                       IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+                                     (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+                       break;
+
+               default:
                        IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+                       break;
+               }
 
                /* re-eable the arbiter */
                rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2044,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCB
                                 | IXGBE_FLAG_DCB_ENABLED
 #endif
+                                | IXGBE_FLAG_SRIOV_ENABLED
                                );
 
        switch (mask) {
        case (IXGBE_FLAG_RSS_ENABLED):
                mrqc = IXGBE_MRQC_RSSEN;
                break;
+       case (IXGBE_FLAG_SRIOV_ENABLED):
+               mrqc = IXGBE_MRQC_VMDQEN;
+               break;
 #ifdef CONFIG_IXGBE_DCB
        case (IXGBE_FLAG_DCB_ENABLED):
                mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2066,18 +2159,18 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
  * @adapter:    address of board private structure
  * @index:      index of ring to set
- * @rx_buf_len: rx buffer length
  **/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
-                                   int rx_buf_len)
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
 {
        struct ixgbe_ring *rx_ring;
        struct ixgbe_hw *hw = &adapter->hw;
        int j;
        u32 rscctrl;
+       int rx_buf_len;
 
        rx_ring = &adapter->rx_ring[index];
        j = rx_ring->reg_idx;
+       rx_buf_len = rx_ring->rx_buf_len;
        rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
        rscctrl |= IXGBE_RSCCTL_RSCEN;
        /*
@@ -2130,7 +2223,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        int rx_buf_len;
 
        /* Decide whether to use packet split mode or not */
-       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+       /* Do not use packet split if we're in SR-IOV Mode */
+       if (!adapter->num_vfs)
+               adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2142,7 +2237,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                                      IXGBE_PSRTYPE_IPV4HDR |
                                      IXGBE_PSRTYPE_IPV6HDR |
                                      IXGBE_PSRTYPE_L2HDR;
-                       IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+                       IXGBE_WRITE_REG(hw,
+                                       IXGBE_PSRTYPE(adapter->num_vfs),
+                                       psrtype);
                }
        } else {
                if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2228,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
        }
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               u32 vt_reg_bits;
+               u32 reg_offset, vf_shift;
+               u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+               vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+                       | IXGBE_VT_CTL_REPLEN;
+               vt_reg_bits |= (adapter->num_vfs <<
+                               IXGBE_VT_CTL_POOL_SHIFT);
+               IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+               vf_shift = adapter->num_vfs % 32;
+               reg_offset = adapter->num_vfs / 32;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+               /* Enable only the PF's pool for Tx/Rx */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+               ixgbe_set_vmolr(hw, adapter->num_vfs);
+       }
+
        /* Program MRQC for the distribution of queues */
        mrqc = ixgbe_setup_mrqc(adapter);
 
@@ -2259,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        }
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
+       if (adapter->num_vfs) {
+               u32 reg;
+
+               /* Map PF MAC address in RAR Entry 0 to first pool
+                * following VFs */
+               hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+               /* Set up VF register offsets for selected VT Mode, i.e.
+                * 64 VFs for SR-IOV */
+               reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+               reg |= IXGBE_GCR_EXT_SRIOV;
+               IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+       }
+
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2285,7 +2420,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                /* Enable 82599 HW-RSC */
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       ixgbe_configure_rscctl(adapter, i, rx_buf_len);
+                       ixgbe_configure_rscctl(adapter, i);
 
                /* Disable RSC for ACK packets */
                IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
@@ -2297,15 +2432,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int pool_ndx = adapter->num_vfs;
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
 }
 
 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int pool_ndx = adapter->num_vfs;
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_disable(adapter);
@@ -2316,7 +2453,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
                ixgbe_irq_enable(adapter);
 
        /* remove VID from filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
 }
 
 static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2336,23 +2473,25 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
         * not in DCB mode.
         */
        ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+
+       /* Disable CFI check */
+       ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+
+       /* enable VLAN tag stripping */
        if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
-               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+               ctrl |= IXGBE_VLNCTRL_VME;
        } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               ctrl |= IXGBE_VLNCTRL_VFE;
-               /* enable VLAN tag insert/strip */
-               ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
                for (i = 0; i < adapter->num_rx_queues; i++) {
+                       u32 ctrl;
                        j = adapter->rx_ring[i].reg_idx;
                        ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
                        ctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
                }
        }
+
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+
        ixgbe_vlan_rx_add_vid(netdev, 0);
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2397,7 +2536,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
  * responsible for configuring the hardware for proper unicast, multicast and
  * promiscuous mode.
  **/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -2429,7 +2568,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 
        /* reprogram secondary unicast list */
-       hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
+       hw->mac.ops.update_uc_addr_list(hw, netdev);
 
        /* reprogram multicast list */
        addr_count = netdev->mc_count;
@@ -2437,6 +2576,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
                addr_list = netdev->mc_list->dmi_addr;
        hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
                                        ixgbe_addr_list_itr);
+       if (adapter->num_vfs)
+               ixgbe_restore_vf_multicasts(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2685,6 +2826,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        u32 txdctl, rxdctl, mhadd;
        u32 dmatxctl;
        u32 gpie;
+       u32 ctrl_ext;
 
        ixgbe_get_hw_control(adapter);
 
@@ -2697,12 +2839,31 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                        /* MSI only */
                        gpie = 0;
                }
+               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+                       gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+                       gpie |= IXGBE_GPIE_VTMODE_64;
+               }
                /* XXX: to interrupt immediately for EICS writes, enable this */
                /* gpie |= IXGBE_GPIE_EIMEN; */
                IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
 
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               /*
+                * use EIAM to auto-mask when MSI-X interrupt is asserted
+                * this saves a register write for every interrupt
+                */
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+                       break;
+               default:
+               case ixgbe_mac_82599EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+                       break;
+               }
+       } else {
                /* legacy interrupts, use EIAM to auto-mask when reading EICR,
                 * specifically only auto mask tx and rx interrupts */
                IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
@@ -2756,6 +2917,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       int wait_loop = 10;
+                       /* poll for Tx Enable ready */
+                       do {
+                               msleep(1);
+                               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+                       } while (--wait_loop &&
+                                !(txdctl & IXGBE_TXDCTL_ENABLE));
+                       if (!wait_loop)
+                               DPRINTK(DRV, ERR, "Could not enable "
+                                       "Tx Queue %d\n", j);
+               }
        }
 
        for (i = 0; i < num_rx_rings; i++) {
@@ -2843,6 +3016,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
        adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
+
+       /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
        return 0;
 }
 
@@ -2891,7 +3070,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        }
 
        /* reprogram the RAR[0] in case user changed it. */
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+                           IXGBE_RAH_AV);
 }
 
 /**
@@ -3023,6 +3203,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
 
+       /* disable receive for all VFs and wait one second */
+       if (adapter->num_vfs) {
+               for (i = 0 ; i < adapter->num_vfs; i++)
+                       adapter->vfinfo[i].clear_to_send = 0;
+
+               /* ping all the active vfs to let them know we are going down */
+               ixgbe_ping_all_vfs(adapter);
+               /* Disable all VFTE/VFRE TX/RX */
+               ixgbe_disable_tx_rx(adapter);
+       }
+
        /* disable receives */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3259,6 +3450,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 }
 
 #endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+       return false;
+}
+
 /*
  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
@@ -3272,6 +3476,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
+       /* Start with base case */
+       adapter->num_rx_queues = 1;
+       adapter->num_tx_queues = 1;
+       adapter->num_rx_pools = adapter->num_rx_queues;
+       adapter->num_rx_queues_per_pool = 1;
+
+       if (ixgbe_set_sriov_queues(adapter))
+               return;
+
 #ifdef IXGBE_FCOE
        if (ixgbe_set_fcoe_queues(adapter))
                goto done;
@@ -3543,6 +3756,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 
 #endif /* IXGBE_FCOE */
 /**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+       adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
+       adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
+       if (adapter->num_vfs)
+               return true;
+       else
+               return false;
+}
+
+/**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
  *
@@ -3559,6 +3790,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
        adapter->rx_ring[0].reg_idx = 0;
        adapter->tx_ring[0].reg_idx = 0;
 
+       if (ixgbe_cache_ring_sriov(adapter))
+               return;
+
 #ifdef IXGBE_FCOE
        if (ixgbe_cache_ring_fcoe(adapter))
                return;
@@ -3668,6 +3902,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
        adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
        adapter->atr_sample_rate = 0;
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               ixgbe_disable_sriov(adapter);
+
        ixgbe_set_num_queues(adapter);
 
        err = pci_enable_msi(adapter->pdev);
@@ -3946,8 +4183,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
                adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
                adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
                /* Default traffic class to use for FCoE */
                adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+#endif
 #endif /* IXGBE_FCOE */
        }
 
@@ -4344,6 +4583,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
+       /*
+        * pci_restore_state clears dev->state_saved so call
+        * pci_save_state to restore it.
+        */
+       pci_save_state(pdev);
 
        err = pci_enable_device_mem(pdev);
        if (err) {
@@ -4482,6 +4726,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+       u64 non_eop_descs = 0, restart_queue = 0;
 
        if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                u64 rsc_count = 0;
@@ -4497,6 +4742,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                adapter->rsc_total_flush = rsc_flush;
        }
 
+       /* gather some stats to the adapter struct that are per queue */
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               restart_queue += adapter->tx_ring[i].restart_queue;
+       adapter->restart_queue = restart_queue;
+
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+       adapter->non_eop_descs = non_eop_descs;
+
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
                /* for packet buffers not used, the register should read 0 */
@@ -4879,14 +5133,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                                                                 iph->daddr, 0,
                                                                 IPPROTO_TCP,
                                                                 0);
-                       adapter->hw_tso_ctxt++;
-               } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+               } else if (skb_is_gso_v6(skb)) {
                        ipv6_hdr(skb)->payload_len = 0;
                        tcp_hdr(skb)->check =
                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                             &ipv6_hdr(skb)->daddr,
                                             0, IPPROTO_TCP, 0);
-                       adapter->hw_tso6_ctxt++;
                }
 
                i = tx_ring->next_to_use;
@@ -4969,7 +5221,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                                    IXGBE_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
+                       __be16 protocol;
+
+                       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+                               const struct vlan_ethhdr *vhdr =
+                                       (const struct vlan_ethhdr *)skb->data;
+
+                               protocol = vhdr->h_vlan_encapsulated_proto;
+                       } else {
+                               protocol = skb->protocol;
+                       }
+
+                       switch (protocol) {
                        case cpu_to_be16(ETH_P_IP):
                                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5005,7 +5268,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
-               adapter->hw_csum_tx_good++;
                i++;
                if (i == tx_ring->count)
                        i = 0;
@@ -5022,23 +5284,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                         struct sk_buff *skb, u32 tx_flags,
                         unsigned int first)
 {
+       struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
-       dma_addr_t *map;
 
        i = tx_ring->next_to_use;
 
-       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
-               return 0;
-       }
-
-       map = skb_shinfo(skb)->dma_maps;
-
        if (tx_flags & IXGBE_TX_FLAGS_FCOE)
                /* excluding fcoe_crc_eof for FCoE */
                total -= sizeof(struct fcoe_crc_eof);
@@ -5049,7 +5304,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                tx_buffer_info->length = size;
-               tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+               tx_buffer_info->mapped_as_page = false;
+               tx_buffer_info->dma = pci_map_single(pdev,
+                                                    skb->data + offset,
+                                                    size, PCI_DMA_TODEVICE);
+               if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+                       goto dma_error;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
@@ -5070,7 +5330,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                frag = &skb_shinfo(skb)->frags[f];
                len = min((unsigned int)frag->size, total);
-               offset = 0;
+               offset = frag->page_offset;
 
                while (len) {
                        i++;
@@ -5081,7 +5341,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                        tx_buffer_info->length = size;
-                       tx_buffer_info->dma = map[f] + offset;
+                       tx_buffer_info->dma = pci_map_page(adapter->pdev,
+                                                          frag->page,
+                                                          offset, size,
+                                                          PCI_DMA_TODEVICE);
+                       tx_buffer_info->mapped_as_page = true;
+                       if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+                               goto dma_error;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
 
@@ -5098,6 +5364,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 
        return count;
+
+dma_error:
+       dev_err(&pdev->dev, "TX DMA map failed\n");
+
+       /* clear timestamp and dma mappings for failed tx_buffer_info map */
+       tx_buffer_info->dma = 0;
+       tx_buffer_info->time_stamp = 0;
+       tx_buffer_info->next_to_watch = 0;
+       if (count)
+               count--;
+
+       /* clear timestamp and dma mappings for remaining portion of packet */
+       while (count--) {
+               if (i==0)
+                       i += tx_ring->count;
+               i--;
+               tx_buffer_info = &tx_ring->tx_buffer_info[i];
+               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+       }
+
+       return 0;
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5217,8 +5504,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                                  struct ixgbe_ring *tx_ring, int size)
 {
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
        netif_stop_subqueue(netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
@@ -5232,7 +5517,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
        netif_start_subqueue(netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       ++tx_ring->restart_queue;
        return 0;
 }
 
@@ -5247,10 +5532,22 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
+       int txq = smp_processor_id();
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-               return smp_processor_id();
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+               while (unlikely(txq >= dev->real_num_tx_queues))
+                       txq -= dev->real_num_tx_queues;
+               return txq;
+       }
 
+#ifdef IXGBE_FCOE
+       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+           (skb->protocol == htons(ETH_P_FCOE))) {
+               txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+               txq += adapter->ring_feature[RING_F_FCOE].mask;
+               return txq;
+       }
+#endif
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
 
@@ -5262,10 +5559,11 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *tx_ring;
+       struct netdev_queue *txq;
        unsigned int first;
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
-       int r_idx = 0, tso;
+       int tso;
        int count = 0;
        unsigned int f;
 
@@ -5273,13 +5571,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
                tx_flags |= vlan_tx_tag_get(skb);
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
-                       tx_flags |= (skb->queue_mapping << 13);
+                       tx_flags |= ((skb->queue_mapping & 0x7) << 13);
                }
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                if (skb->priority != TC_PRIO_CONTROL) {
-                       tx_flags |= (skb->queue_mapping << 13);
+                       tx_flags |= ((skb->queue_mapping & 0x7) << 13);
                        tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                        tx_flags |= IXGBE_TX_FLAGS_VLAN;
                } else {
@@ -5288,17 +5586,18 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
                }
        }
 
-       r_idx = skb->queue_mapping;
-       tx_ring = &adapter->tx_ring[r_idx];
+       tx_ring = &adapter->tx_ring[skb->queue_mapping];
 
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
            (skb->protocol == htons(ETH_P_FCOE))) {
                tx_flags |= IXGBE_TX_FLAGS_FCOE;
 #ifdef IXGBE_FCOE
-               r_idx = smp_processor_id();
-               r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
-               r_idx += adapter->ring_feature[RING_F_FCOE].mask;
-               tx_ring = &adapter->tx_ring[r_idx];
+#ifdef CONFIG_IXGBE_DCB
+               tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= ((adapter->fcoe.up << 13)
+                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
+#endif
 #endif
        }
        /* four things can cause us to need a context descriptor */
@@ -5358,6 +5657,9 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
                                tx_ring->atr_count = 0;
                        }
                }
+               txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
+               txq->tx_bytes += skb->len;
+               txq->tx_packets++;
                ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
                               hdr_len);
                ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
@@ -5372,19 +5674,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 }
 
 /**
- * ixgbe_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
-{
-       /* only return the current stats */
-       return &netdev->stats;
-}
-
-/**
  * ixgbe_set_mac - Change the Ethernet Address of the NIC
  * @netdev: network interface device structure
  * @p: pointer to an address structure
@@ -5403,7 +5692,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+                           IXGBE_RAH_AV);
 
        return 0;
 }
@@ -5495,6 +5785,10 @@ static void ixgbe_netpoll(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int i;
 
+       /* if interface is down do nothing */
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               return;
+
        adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
                int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -5514,7 +5808,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_stop               = ixgbe_close,
        .ndo_start_xmit         = ixgbe_xmit_frame,
        .ndo_select_queue       = ixgbe_select_queue,
-       .ndo_get_stats          = ixgbe_get_stats,
        .ndo_set_rx_mode        = ixgbe_set_rx_mode,
        .ndo_set_multicast_list = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
@@ -5537,6 +5830,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 #endif /* IXGBE_FCOE */
 };
 
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+                          const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+       struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+               return;
+
+       /* The 82599 supports up to 64 VFs per physical function
+        * but this implementation limits allocation to 63 so that
+        * basic networking resources are still available to the
+        * physical function
+        */
+       adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+       adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+       err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+       if (err) {
+               DPRINTK(PROBE, ERR,
+                       "Failed to enable PCI sriov: %d\n", err);
+               goto err_novfs;
+       }
+       /* If call to enable VFs succeeded then allocate memory
+        * for per VF control structures.
+        */
+       adapter->vfinfo =
+               kcalloc(adapter->num_vfs,
+                       sizeof(struct vf_data_storage), GFP_KERNEL);
+       if (adapter->vfinfo) {
+               /* Now that we're sure SR-IOV is enabled
+                * and memory allocated set up the mailbox parameters
+                */
+               ixgbe_init_mbx_params_pf(hw);
+               memcpy(&hw->mbx.ops, ii->mbx_ops,
+                      sizeof(hw->mbx.ops));
+
+               /* Disable RSC when in SR-IOV mode */
+               adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+                                    IXGBE_FLAG2_RSC_ENABLED);
+               return;
+       }
+
+       /* Oh oh */
+       DPRINTK(PROBE, ERR,
+               "Unable to allocate memory for VF "
+               "Data Storage - SRIOV disabled\n");
+       pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+       adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+       adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
 /**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -5711,6 +6059,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
        }
 
+       ixgbe_probe_vf(adapter, ii);
+
        netdev->features = NETIF_F_SG |
                           NETIF_F_IP_CSUM |
                           NETIF_F_HW_VLAN_TX |
@@ -5731,6 +6081,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->vlan_features |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+                                   IXGBE_FLAG_DCB_ENABLED);
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 
@@ -5857,6 +6210,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                ixgbe_setup_dca(adapter);
        }
 #endif
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+                       adapter->num_vfs);
+               for (i = 0; i < adapter->num_vfs; i++)
+                       ixgbe_vf_configuration(pdev, (i | 0x10000000));
+       }
+
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
 
@@ -5869,6 +6229,8 @@ err_register:
        ixgbe_clear_interrupt_scheme(adapter);
 err_sw_init:
 err_eeprom:
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               ixgbe_disable_sriov(adapter);
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->sfp_timer);
        cancel_work_sync(&adapter->sfp_task);
@@ -5937,6 +6299,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               ixgbe_disable_sriov(adapter);
+
        ixgbe_clear_interrupt_scheme(adapter);
 
        ixgbe_release_hw_control(adapter);