igb: only support SRRCTL_DROP_EN when using multiple queues
[safe/jmp/linux-2.6] / drivers / net / igb / igb_main.c
index eca5684..b989b34 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007 Intel Corporation.
+  Copyright(c) 2007-2009 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,7 @@
 #include <linux/ipv6.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
+#include <linux/net_tstamp.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
+#include <linux/aer.h>
 #ifdef CONFIG_IGB_DCA
 #include <linux/dca.h>
 #endif
 #include "igb.h"
 
-#define DRV_VERSION "1.2.45-k2"
+#define DRV_VERSION "2.1.0-k2"
 char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
 
 static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
 };
 
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -76,6 +87,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -84,39 +96,30 @@ static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
 static void igb_clean_rx_ring(struct igb_ring *);
-static void igb_set_multi(struct net_device *);
+static void igb_set_rx_mode(struct net_device *);
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
-static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
-                                 struct igb_ring *);
-static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
+static void igb_set_uta(struct igb_adapter *adapter);
 static irqreturn_t igb_intr(int irq, void *);
 static irqreturn_t igb_intr_msi(int irq, void *);
 static irqreturn_t igb_msix_other(int irq, void *);
-static irqreturn_t igb_msix_rx(int irq, void *);
-static irqreturn_t igb_msix_tx(int irq, void *);
-static int igb_clean_rx_ring_msix(struct napi_struct *, int);
+static irqreturn_t igb_msix_ring(int irq, void *);
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *);
-static void igb_update_tx_dca(struct igb_ring *);
+static void igb_update_dca(struct igb_q_vector *);
 static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
-static bool igb_clean_tx_irq(struct igb_ring *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
-#ifdef CONFIG_IGB_LRO
-static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
-#endif
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -124,9 +127,21 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
 static void igb_vlan_rx_add_vid(struct net_device *, u16);
 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+static void igb_ping_all_vfs(struct igb_adapter *);
+static void igb_msg_task(struct igb_adapter *);
+static void igb_vmm_control(struct igb_adapter *);
+static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+                              int vf, u16 vlan, u8 qos);
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+                                struct ifla_vf_info *ivi);
 
-static int igb_suspend(struct pci_dev *, pm_message_t);
 #ifdef CONFIG_PM
+static int igb_suspend(struct pci_dev *, pm_message_t);
 static int igb_resume(struct pci_dev *);
 #endif
 static void igb_shutdown(struct pci_dev *);
@@ -138,11 +153,16 @@ static struct notifier_block dca_notifier = {
        .priority       = 0
 };
 #endif
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /* for netdump / net console */
 static void igb_netpoll(struct net_device *);
 #endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs = 0;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+                 "per physical function");
+#endif /* CONFIG_PCI_IOV */
 
 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
                     pci_channel_state_t);
@@ -170,13 +190,37 @@ static struct pci_driver igb_driver = {
        .err_handler = &igb_err_handler
 };
 
-static int global_quad_port_a; /* global quad port a indication */
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+/**
+ * igb_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t igb_read_clock(const struct cyclecounter *tc)
+{
+       struct igb_adapter *adapter =
+               container_of(tc, struct igb_adapter, cycles);
+       struct e1000_hw *hw = &adapter->hw;
+       u64 stamp = 0;
+       int shift = 0;
+
+       /*
+        * The timestamp latches on lowest register read. For the 82580
+        * the lowest register is SYSTIMR instead of SYSTIML.  However we never
+        * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
+        */
+       if (hw->mac.type == e1000_82580) {
+               stamp = rd32(E1000_SYSTIMR) >> 8;
+               shift = IGB_82580_TSYNC_SHIFT;
+       }
+
+       stamp |= (u64)rd32(E1000_SYSTIML) << shift;
+       stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
+       return stamp;
+}
+
 #ifdef DEBUG
 /**
  * igb_get_hw_dev_name - return device name string
@@ -187,6 +231,30 @@ char *igb_get_hw_dev_name(struct e1000_hw *hw)
        struct igb_adapter *adapter = hw->back;
        return adapter->netdev->name;
 }
+
+/**
+ * igb_get_time_str - format current NIC and system time as string
+ */
+static char *igb_get_time_str(struct igb_adapter *adapter,
+                             char buffer[160])
+{
+       cycle_t hw = adapter->cycles.read(&adapter->cycles);
+       struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
+       struct timespec sys;
+       struct timespec delta;
+       getnstimeofday(&sys);
+
+       delta = timespec_sub(nic, sys);
+
+       sprintf(buffer,
+               "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
+               hw,
+               (long)nic.tv_sec, nic.tv_nsec,
+               (long)sys.tv_sec, sys.tv_nsec,
+               (long)delta.tv_sec, delta.tv_nsec);
+
+       return buffer;
+}
 #endif
 
 /**
@@ -203,12 +271,10 @@ static int __init igb_init_module(void)
 
        printk(KERN_INFO "%s\n", igb_copyright);
 
-       global_quad_port_a = 0;
-
-       ret = pci_register_driver(&igb_driver);
 #ifdef CONFIG_IGB_DCA
        dca_register_notify(&dca_notifier);
 #endif
+       ret = pci_register_driver(&igb_driver);
        return ret;
 }
 
@@ -230,6 +296,61 @@ static void __exit igb_exit_module(void)
 
 module_exit(igb_exit_module);
 
+#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+/**
+ * igb_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ **/
+static void igb_cache_ring_register(struct igb_adapter *adapter)
+{
+       int i = 0, j = 0;
+       u32 rbase_offset = adapter->vfs_allocated_count;
+
+       switch (adapter->hw.mac.type) {
+       case e1000_82576:
+               /* The queues are allocated for virtualization such that VF 0
+                * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+                * In order to avoid collision we start at the first free queue
+                * and continue consuming queues in the same sequence
+                */
+               if (adapter->vfs_allocated_count) {
+                       for (; i < adapter->rss_queues; i++)
+                               adapter->rx_ring[i]->reg_idx = rbase_offset +
+                                                              Q_IDX_82576(i);
+                       for (; j < adapter->rss_queues; j++)
+                               adapter->tx_ring[j]->reg_idx = rbase_offset +
+                                                              Q_IDX_82576(j);
+               }
+       case e1000_82575:
+       case e1000_82580:
+       default:
+               for (; i < adapter->num_rx_queues; i++)
+                       adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+               for (; j < adapter->num_tx_queues; j++)
+                       adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+               break;
+       }
+}
+
+static void igb_free_queues(struct igb_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               kfree(adapter->tx_ring[i]);
+               adapter->tx_ring[i] = NULL;
+       }
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               kfree(adapter->rx_ring[i]);
+               adapter->rx_ring[i] = NULL;
+       }
+       adapter->num_rx_queues = 0;
+       adapter->num_tx_queues = 0;
+}
+
 /**
  * igb_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
@@ -239,59 +360,63 @@ module_exit(igb_exit_module);
  **/
 static int igb_alloc_queues(struct igb_adapter *adapter)
 {
+       struct igb_ring *ring;
        int i;
 
-       adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-                                  sizeof(struct igb_ring), GFP_KERNEL);
-       if (!adapter->tx_ring)
-               return -ENOMEM;
-
-       adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-                                  sizeof(struct igb_ring), GFP_KERNEL);
-       if (!adapter->rx_ring) {
-               kfree(adapter->tx_ring);
-               return -ENOMEM;
-       }
-
-       adapter->rx_ring->buddy = adapter->tx_ring;
-
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *ring = &(adapter->tx_ring[i]);
+               ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+               if (!ring)
+                       goto err;
                ring->count = adapter->tx_ring_count;
-               ring->adapter = adapter;
                ring->queue_index = i;
+               ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               /* For 82575, context index must be unique per ring. */
+               if (adapter->hw.mac.type == e1000_82575)
+                       ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
+               adapter->tx_ring[i] = ring;
        }
+
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *ring = &(adapter->rx_ring[i]);
+               ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+               if (!ring)
+                       goto err;
                ring->count = adapter->rx_ring_count;
-               ring->adapter = adapter;
                ring->queue_index = i;
-               ring->itr_register = E1000_ITR;
-
-               /* set a default napi handler for each rx_ring */
-               netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
+               ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+               ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+               /* set flag indicating ring supports SCTP checksum offload */
+               if (adapter->hw.mac.type >= e1000_82576)
+                       ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
+               adapter->rx_ring[i] = ring;
        }
-       return 0;
-}
 
-static void igb_free_queues(struct igb_adapter *adapter)
-{
-       int i;
+       igb_cache_ring_register(adapter);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               netif_napi_del(&adapter->rx_ring[i].napi);
+       return 0;
 
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+err:
+       igb_free_queues(adapter);
+
+       return -ENOMEM;
 }
 
 #define IGB_N0_QUEUE -1
-static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
-                             int tx_queue, int msix_vector)
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
 {
        u32 msixbm = 0;
+       struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
        u32 ivar, index;
+       int rx_queue = IGB_N0_QUEUE;
+       int tx_queue = IGB_N0_QUEUE;
+
+       if (q_vector->rx_ring)
+               rx_queue = q_vector->rx_ring->reg_idx;
+       if (q_vector->tx_ring)
+               tx_queue = q_vector->tx_ring->reg_idx;
 
        switch (hw->mac.type) {
        case e1000_82575:
@@ -299,19 +424,17 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                   bitmask for the EICR/EIMS/EIMC registers.  To assign one
                   or more queues to a vector, we write the appropriate bits
                   into the MSIXBM register for that vector. */
-               if (rx_queue > IGB_N0_QUEUE) {
+               if (rx_queue > IGB_N0_QUEUE)
                        msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
-                       adapter->rx_ring[rx_queue].eims_value = msixbm;
-               }
-               if (tx_queue > IGB_N0_QUEUE) {
+               if (tx_queue > IGB_N0_QUEUE)
                        msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
-                       adapter->tx_ring[tx_queue].eims_value =
-                                 E1000_EICR_TX_QUEUE0 << tx_queue;
-               }
+               if (!adapter->msix_entries && msix_vector == 0)
+                       msixbm |= E1000_EIMS_OTHER;
                array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+               q_vector->eims_value = msixbm;
                break;
        case e1000_82576:
-               /* The 82576 uses a table-based method for assigning vectors.
+               /* 82576 uses a table-based method for assigning vectors.
                   Each queue has a single entry in the table to which we write
                   a vector number along with a "valid" bit.  Sadly, the layout
                   of the table is somewhat counterintuitive. */
@@ -327,7 +450,6 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                                ivar = ivar & 0xFF00FFFF;
                                ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
                        }
-                       adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
                        array_wr32(E1000_IVAR0, index, ivar);
                }
                if (tx_queue > IGB_N0_QUEUE) {
@@ -342,14 +464,53 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
                                ivar = ivar & 0x00FFFFFF;
                                ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
                        }
-                       adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
                        array_wr32(E1000_IVAR0, index, ivar);
                }
+               q_vector->eims_value = 1 << msix_vector;
+               break;
+       case e1000_82580:
+               /* 82580 uses the same table-based approach as 82576 but has fewer
+                  entries as a result we carry over for queues greater than 4. */
+               if (rx_queue > IGB_N0_QUEUE) {
+                       index = (rx_queue >> 1);
+                       ivar = array_rd32(E1000_IVAR0, index);
+                       if (rx_queue & 0x1) {
+                               /* vector goes into third byte of register */
+                               ivar = ivar & 0xFF00FFFF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+                       } else {
+                               /* vector goes into low byte of register */
+                               ivar = ivar & 0xFFFFFF00;
+                               ivar |= msix_vector | E1000_IVAR_VALID;
+                       }
+                       array_wr32(E1000_IVAR0, index, ivar);
+               }
+               if (tx_queue > IGB_N0_QUEUE) {
+                       index = (tx_queue >> 1);
+                       ivar = array_rd32(E1000_IVAR0, index);
+                       if (tx_queue & 0x1) {
+                               /* vector goes into high byte of register */
+                               ivar = ivar & 0x00FFFFFF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+                       } else {
+                               /* vector goes into second byte of register */
+                               ivar = ivar & 0xFFFF00FF;
+                               ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+                       }
+                       array_wr32(E1000_IVAR0, index, ivar);
+               }
+               q_vector->eims_value = 1 << msix_vector;
                break;
        default:
                BUG();
                break;
        }
+
+       /* add q_vector eims value to global eims_enable_mask */
+       adapter->eims_enable_mask |= q_vector->eims_value;
+
+       /* configure q_vector to set itr on first interrupt */
+       q_vector->set_itr = 1;
 }
 
 /**
@@ -365,43 +526,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
 
        adapter->eims_enable_mask = 0;
-       if (hw->mac.type == e1000_82576)
-               /* Turn on MSI-X capability first, or our settings
-                * won't stick.  And it will take days to debug. */
-               wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
-                                  E1000_GPIE_PBA | E1000_GPIE_EIAME | 
-                                  E1000_GPIE_NSICR);
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *tx_ring = &adapter->tx_ring[i];
-               igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
-               adapter->eims_enable_mask |= tx_ring->eims_value;
-               if (tx_ring->itr_val)
-                       writel(tx_ring->itr_val,
-                              hw->hw_addr + tx_ring->itr_register);
-               else
-                       writel(1, hw->hw_addr + tx_ring->itr_register);
-       }
-
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *rx_ring = &adapter->rx_ring[i];
-               rx_ring->buddy = NULL;
-               igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
-               adapter->eims_enable_mask |= rx_ring->eims_value;
-               if (rx_ring->itr_val)
-                       writel(rx_ring->itr_val,
-                              hw->hw_addr + rx_ring->itr_register);
-               else
-                       writel(1, hw->hw_addr + rx_ring->itr_register);
-       }
-
 
        /* set vector for other causes, i.e. link changes */
        switch (hw->mac.type) {
        case e1000_82575:
-               array_wr32(E1000_MSIXBM(0), vector++,
-                                     E1000_EIMS_OTHER);
-
                tmp = rd32(E1000_CTRL_EXT);
                /* enable MSI-X PBA support*/
                tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -411,22 +539,38 @@ static void igb_configure_msix(struct igb_adapter *adapter)
                tmp |= E1000_CTRL_EXT_IRCA;
 
                wr32(E1000_CTRL_EXT, tmp);
-               adapter->eims_enable_mask |= E1000_EIMS_OTHER;
+
+               /* enable msix_other interrupt */
+               array_wr32(E1000_MSIXBM(0), vector++,
+                                     E1000_EIMS_OTHER);
                adapter->eims_other = E1000_EIMS_OTHER;
 
                break;
 
        case e1000_82576:
+       case e1000_82580:
+               /* Turn on MSI-X capability first, or our settings
+                * won't stick.  And it will take days to debug. */
+               wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+                               E1000_GPIE_PBA | E1000_GPIE_EIAME |
+                               E1000_GPIE_NSICR);
+
+               /* enable msix_other interrupt */
+               adapter->eims_other = 1 << vector;
                tmp = (vector++ | E1000_IVAR_VALID) << 8;
-               wr32(E1000_IVAR_MISC, tmp);
 
-               adapter->eims_enable_mask = (1 << (vector)) - 1;
-               adapter->eims_other = 1 << (vector - 1);
+               wr32(E1000_IVAR_MISC, tmp);
                break;
        default:
                /* do nothing, since nothing else supports MSI-X */
                break;
        } /* switch (hw->mac.type) */
+
+       adapter->eims_enable_mask |= adapter->eims_other;
+
+       for (i = 0; i < adapter->num_q_vectors; i++)
+               igb_assign_vector(adapter->q_vector[i], vector++);
+
        wrfl();
 }
 
@@ -439,46 +583,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 static int igb_request_msix(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
        int i, err = 0, vector = 0;
 
-       vector = 0;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *ring = &(adapter->tx_ring[i]);
-               sprintf(ring->name, "%s-tx%d", netdev->name, i);
-               err = request_irq(adapter->msix_entries[vector].vector,
-                                 &igb_msix_tx, 0, ring->name,
-                                 &(adapter->tx_ring[i]));
-               if (err)
-                       goto out;
-               ring->itr_register = E1000_EITR(0) + (vector << 2);
-               ring->itr_val = 976; /* ~4000 ints/sec */
-               vector++;
-       }
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *ring = &(adapter->rx_ring[i]);
-               if (strlen(netdev->name) < (IFNAMSIZ - 5))
-                       sprintf(ring->name, "%s-rx%d", netdev->name, i);
+       err = request_irq(adapter->msix_entries[vector].vector,
+                         igb_msix_other, 0, netdev->name, adapter);
+       if (err)
+               goto out;
+       vector++;
+
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+               q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+               if (q_vector->rx_ring && q_vector->tx_ring)
+                       sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+                               q_vector->rx_ring->queue_index);
+               else if (q_vector->tx_ring)
+                       sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+                               q_vector->tx_ring->queue_index);
+               else if (q_vector->rx_ring)
+                       sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+                               q_vector->rx_ring->queue_index);
                else
-                       memcpy(ring->name, netdev->name, IFNAMSIZ);
+                       sprintf(q_vector->name, "%s-unused", netdev->name);
+
                err = request_irq(adapter->msix_entries[vector].vector,
-                                 &igb_msix_rx, 0, ring->name,
-                                 &(adapter->rx_ring[i]));
+                                 igb_msix_ring, 0, q_vector->name,
+                                 q_vector);
                if (err)
                        goto out;
-               ring->itr_register = E1000_EITR(0) + (vector << 2);
-               ring->itr_val = adapter->itr;
-               /* overwrite the poll routine for MSIX, we've already done
-                * netif_napi_add */
-               ring->napi.poll = &igb_clean_rx_ring_msix;
                vector++;
        }
 
-       err = request_irq(adapter->msix_entries[vector].vector,
-                         &igb_msix_other, 0, netdev->name, netdev);
-       if (err)
-               goto out;
-
        igb_configure_msix(adapter);
        return 0;
 out:
@@ -491,11 +629,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
                pci_disable_msix(adapter->pdev);
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-       } else if (adapter->flags & IGB_FLAG_HAS_MSI)
+       } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
                pci_disable_msi(adapter->pdev);
-       return;
+       }
+}
+
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+       int v_idx;
+
+       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+               adapter->q_vector[v_idx] = NULL;
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+       }
+       adapter->num_q_vectors = 0;
 }
 
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+       igb_free_queues(adapter);
+       igb_free_q_vectors(adapter);
+       igb_reset_interrupt_capability(adapter);
+}
 
 /**
  * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -508,7 +679,22 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
        int err;
        int numvecs, i;
 
-       numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
+       /* Number of supported queues. */
+       adapter->num_rx_queues = adapter->rss_queues;
+       adapter->num_tx_queues = adapter->rss_queues;
+
+       /* start with one vector for every rx queue */
+       numvecs = adapter->num_rx_queues;
+
+       /* if tx handler is seperate add 1 for every tx queue */
+       if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+               numvecs += adapter->num_tx_queues;
+
+       /* store the number of vectors reserved for queues */
+       adapter->num_q_vectors = numvecs;
+
+       /* add 1 vector for link status interrupts */
+       numvecs++;
        adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
                                        GFP_KERNEL);
        if (!adapter->msix_entries)
@@ -527,8 +713,27 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
 
        /* If we can't do MSI-X, try MSI */
 msi_only:
+#ifdef CONFIG_PCI_IOV
+       /* disable SR-IOV for non MSI-X configurations */
+       if (adapter->vf_data) {
+               struct e1000_hw *hw = &adapter->hw;
+               /* disable iov and allow time for transactions to clear */
+               pci_disable_sriov(adapter->pdev);
+               msleep(500);
+
+               kfree(adapter->vf_data);
+               adapter->vf_data = NULL;
+               wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+               msleep(100);
+               dev_info(&adapter->pdev->dev, "IOV Disabled\n");
+       }
+#endif
+       adapter->vfs_allocated_count = 0;
+       adapter->rss_queues = 1;
+       adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
        adapter->num_rx_queues = 1;
        adapter->num_tx_queues = 1;
+       adapter->num_q_vectors = 1;
        if (!pci_enable_msi(adapter->pdev))
                adapter->flags |= IGB_FLAG_HAS_MSI;
 out:
@@ -538,6 +743,139 @@ out:
 }
 
 /**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+       struct igb_q_vector *q_vector;
+       struct e1000_hw *hw = &adapter->hw;
+       int v_idx;
+
+       for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+               q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+               if (!q_vector)
+                       goto err_out;
+               q_vector->adapter = adapter;
+               q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
+               q_vector->itr_val = IGB_START_ITR;
+               netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
+               adapter->q_vector[v_idx] = q_vector;
+       }
+       return 0;
+
+err_out:
+       while (v_idx) {
+               v_idx--;
+               q_vector = adapter->q_vector[v_idx];
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+               adapter->q_vector[v_idx] = NULL;
+       }
+       return -ENOMEM;
+}
+
+static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+       struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+       q_vector->rx_ring = adapter->rx_ring[ring_idx];
+       q_vector->rx_ring->q_vector = q_vector;
+       q_vector->itr_val = adapter->rx_itr_setting;
+       if (q_vector->itr_val && q_vector->itr_val <= 3)
+               q_vector->itr_val = IGB_START_ITR;
+}
+
+static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+       struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+       q_vector->tx_ring = adapter->tx_ring[ring_idx];
+       q_vector->tx_ring->q_vector = q_vector;
+       q_vector->itr_val = adapter->tx_itr_setting;
+       if (q_vector->itr_val && q_vector->itr_val <= 3)
+               q_vector->itr_val = IGB_START_ITR;
+}
+
+/**
+ * igb_map_ring_to_vector - maps allocated queues to vectors
+ *
+ * This function maps the recently allocated queues to vectors.
+ **/
+static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+{
+       int i;
+       int v_idx = 0;
+
+       if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
+           (adapter->num_q_vectors < adapter->num_tx_queues))
+               return -ENOMEM;
+
+       if (adapter->num_q_vectors >=
+           (adapter->num_rx_queues + adapter->num_tx_queues)) {
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+       } else {
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       if (i < adapter->num_tx_queues)
+                               igb_map_tx_ring_to_vector(adapter, i, v_idx);
+                       igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+               }
+               for (; i < adapter->num_tx_queues; i++)
+                       igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+       }
+       return 0;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int err;
+
+       igb_set_interrupt_capability(adapter);
+
+       err = igb_alloc_q_vectors(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+               goto err_alloc_q_vectors;
+       }
+
+       err = igb_alloc_queues(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               goto err_alloc_queues;
+       }
+
+       err = igb_map_ring_to_vector(adapter);
+       if (err) {
+               dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
+               goto err_map_queues;
+       }
+
+
+       return 0;
+err_map_queues:
+       igb_free_queues(adapter);
+err_alloc_queues:
+       igb_free_q_vectors(adapter);
+err_alloc_q_vectors:
+       igb_reset_interrupt_capability(adapter);
+       return err;
+}
+
+/**
  * igb_request_irq - initialize interrupts
  *
  * Attempts to configure interrupts using the best available
@@ -546,7 +884,7 @@ out:
 static int igb_request_irq(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       struct e1000_hw *hw = &adapter->hw;
+       struct pci_dev *pdev = adapter->pdev;
        int err = 0;
 
        if (adapter->msix_entries) {
@@ -554,39 +892,46 @@ static int igb_request_irq(struct igb_adapter *adapter)
                if (!err)
                        goto request_done;
                /* fall back to MSI */
-               igb_reset_interrupt_capability(adapter);
+               igb_clear_interrupt_scheme(adapter);
                if (!pci_enable_msi(adapter->pdev))
                        adapter->flags |= IGB_FLAG_HAS_MSI;
                igb_free_all_tx_resources(adapter);
                igb_free_all_rx_resources(adapter);
+               adapter->num_tx_queues = 1;
                adapter->num_rx_queues = 1;
-               igb_alloc_queues(adapter);
-       } else {
-               switch (hw->mac.type) {
-               case e1000_82575:
-                       wr32(E1000_MSIXBM(0),
-                            (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
-                       break;
-               case e1000_82576:
-                       wr32(E1000_IVAR0, E1000_IVAR_VALID);
-                       break;
-               default:
-                       break;
+               adapter->num_q_vectors = 1;
+               err = igb_alloc_q_vectors(adapter);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "Unable to allocate memory for vectors\n");
+                       goto request_done;
                }
+               err = igb_alloc_queues(adapter);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "Unable to allocate memory for queues\n");
+                       igb_free_q_vectors(adapter);
+                       goto request_done;
+               }
+               igb_setup_all_tx_resources(adapter);
+               igb_setup_all_rx_resources(adapter);
+       } else {
+               igb_assign_vector(adapter->q_vector[0], 0);
        }
 
        if (adapter->flags & IGB_FLAG_HAS_MSI) {
-               err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
-                                 netdev->name, netdev);
+               err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
+                                 netdev->name, adapter);
                if (!err)
                        goto request_done;
+
                /* fall back to legacy interrupts */
                igb_reset_interrupt_capability(adapter);
                adapter->flags &= ~IGB_FLAG_HAS_MSI;
        }
 
-       err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
-                         netdev->name, netdev);
+       err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
+                         netdev->name, adapter);
 
        if (err)
                dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -598,23 +943,19 @@ request_done:
 
 static void igb_free_irq(struct igb_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
-
        if (adapter->msix_entries) {
                int vector = 0, i;
 
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       free_irq(adapter->msix_entries[vector++].vector,
-                               &(adapter->tx_ring[i]));
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       free_irq(adapter->msix_entries[vector++].vector,
-                               &(adapter->rx_ring[i]));
+               free_irq(adapter->msix_entries[vector++].vector, adapter);
 
-               free_irq(adapter->msix_entries[vector++].vector, netdev);
-               return;
+               for (i = 0; i < adapter->num_q_vectors; i++) {
+                       struct igb_q_vector *q_vector = adapter->q_vector[i];
+                       free_irq(adapter->msix_entries[vector++].vector,
+                                q_vector);
+               }
+       } else {
+               free_irq(adapter->pdev->irq, adapter);
        }
-
-       free_irq(adapter->pdev->irq, netdev);
 }
 
 /**
@@ -625,10 +966,17 @@ static void igb_irq_disable(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
 
+       /*
+        * we need to be careful when disabling interrupts.  The VFs are also
+        * mapped into these registers and so clearing the bits can cause
+        * issues on the VF drivers so we only need to clear what we set
+        */
        if (adapter->msix_entries) {
-               wr32(E1000_EIAM, 0);
-               wr32(E1000_EIMC, ~0);
-               wr32(E1000_EIAC, 0);
+               u32 regval = rd32(E1000_EIAM);
+               wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
+               wr32(E1000_EIMC, adapter->eims_enable_mask);
+               regval = rd32(E1000_EIAC);
+               wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
        }
 
        wr32(E1000_IAM, 0);
@@ -646,36 +994,47 @@ static void igb_irq_enable(struct igb_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
 
        if (adapter->msix_entries) {
-               wr32(E1000_EIAC, adapter->eims_enable_mask);
-               wr32(E1000_EIAM, adapter->eims_enable_mask);
+               u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
+               u32 regval = rd32(E1000_EIAC);
+               wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
+               regval = rd32(E1000_EIAM);
+               wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
                wr32(E1000_EIMS, adapter->eims_enable_mask);
-               wr32(E1000_IMS, E1000_IMS_LSC);
+               if (adapter->vfs_allocated_count) {
+                       wr32(E1000_MBVFIMR, 0xFF);
+                       ims |= E1000_IMS_VMMB;
+               }
+               if (adapter->hw.mac.type == e1000_82580)
+                       ims |= E1000_IMS_DRSTA;
+
+               wr32(E1000_IMS, ims);
        } else {
-               wr32(E1000_IMS, IMS_ENABLE_MASK);
-               wr32(E1000_IAM, IMS_ENABLE_MASK);
+               wr32(E1000_IMS, IMS_ENABLE_MASK |
+                               E1000_IMS_DRSTA);
+               wr32(E1000_IAM, IMS_ENABLE_MASK |
+                               E1000_IMS_DRSTA);
        }
 }
 
 static void igb_update_mng_vlan(struct igb_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
        u16 vid = adapter->hw.mng_cookie.vlan_id;
        u16 old_vid = adapter->mng_vlan_id;
-       if (adapter->vlgrp) {
-               if (!vlan_group_get_device(adapter->vlgrp, vid)) {
-                       if (adapter->hw.mng_cookie.status &
-                               E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
-                               igb_vlan_rx_add_vid(netdev, vid);
-                               adapter->mng_vlan_id = vid;
-                       } else
-                               adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
 
-                       if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
-                                       (vid != old_vid) &&
-                           !vlan_group_get_device(adapter->vlgrp, old_vid))
-                               igb_vlan_rx_kill_vid(netdev, old_vid);
-               } else
-                       adapter->mng_vlan_id = vid;
+       if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+               /* add VID to filter table */
+               igb_vfta_set(hw, vid, true);
+               adapter->mng_vlan_id = vid;
+       } else {
+               adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+       }
+
+       if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+           (vid != old_vid) &&
+           !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+               /* remove VID from filter table */
+               igb_vfta_set(hw, old_vid, false);
        }
 }
 
@@ -699,7 +1058,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
                        ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 }
 
-
 /**
  * igb_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
@@ -730,34 +1088,59 @@ static void igb_configure(struct igb_adapter *adapter)
        int i;
 
        igb_get_hw_control(adapter);
-       igb_set_multi(netdev);
+       igb_set_rx_mode(netdev);
 
        igb_restore_vlan(adapter);
 
-       igb_configure_tx(adapter);
+       igb_setup_tctl(adapter);
+       igb_setup_mrqc(adapter);
        igb_setup_rctl(adapter);
+
+       igb_configure_tx(adapter);
        igb_configure_rx(adapter);
 
        igb_rx_fifo_flush_82575(&adapter->hw);
 
-       /* call IGB_DESC_UNUSED which always leaves
+       /* call igb_desc_unused which always leaves
         * at least 1 descriptor unused to make sure
         * next_to_use != next_to_clean */
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *ring = &adapter->rx_ring[i];
-               igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
+               struct igb_ring *ring = adapter->rx_ring[i];
+               igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
        }
 
 
        adapter->tx_queue_len = netdev->tx_queue_len;
 }
 
+/**
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+       if (adapter->hw.phy.media_type == e1000_media_type_copper)
+               igb_power_up_phy_copper(&adapter->hw);
+       else
+               igb_power_up_serdes_link_82575(&adapter->hw);
+}
+
+/**
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+       if (adapter->hw.phy.media_type == e1000_media_type_copper)
+               igb_power_down_phy_copper_82575(&adapter->hw);
+       else
+               igb_shutdown_serdes_link_82575(&adapter->hw);
+}
 
 /**
  * igb_up - Open the interface and prepare it to handle traffic
  * @adapter: board private structure
  **/
-
 int igb_up(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -768,24 +1151,39 @@ int igb_up(struct igb_adapter *adapter)
 
        clear_bit(__IGB_DOWN, &adapter->state);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_enable(&adapter->rx_ring[i].napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               napi_enable(&q_vector->napi);
+       }
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
+       else
+               igb_assign_vector(adapter->q_vector[0], 0);
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
        igb_irq_enable(adapter);
 
-       /* Fire a link change interrupt to start the watchdog. */
-       wr32(E1000_ICS, E1000_ICS_LSC);
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
+       netif_tx_start_all_queues(adapter->netdev);
+
+       /* start the watchdog. */
+       hw->mac.get_link_status = 1;
+       schedule_work(&adapter->watchdog_task);
+
        return 0;
 }
 
 void igb_down(struct igb_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
+       struct e1000_hw *hw = &adapter->hw;
        u32 tctl, rctl;
        int i;
 
@@ -808,8 +1206,10 @@ void igb_down(struct igb_adapter *adapter)
        wrfl();
        msleep(10);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_disable(&adapter->rx_ring[i].napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               napi_disable(&q_vector->napi);
+       }
 
        igb_irq_disable(adapter);
 
@@ -818,6 +1218,10 @@ void igb_down(struct igb_adapter *adapter)
 
        netdev->tx_queue_len = adapter->tx_queue_len;
        netif_carrier_off(netdev);
+
+       /* record the stats before reset*/
+       igb_update_stats(adapter);
+
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
 
@@ -825,6 +1229,11 @@ void igb_down(struct igb_adapter *adapter)
                igb_reset(adapter);
        igb_clean_all_tx_rings(adapter);
        igb_clean_all_rx_rings(adapter);
+#ifdef CONFIG_IGB_DCA
+
+       /* since we reset the hardware DCA settings were cleared */
+       igb_setup_dca(adapter);
+#endif
 }
 
 void igb_reinit_locked(struct igb_adapter *adapter)
@@ -839,6 +1248,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 
 void igb_reset(struct igb_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_mac_info *mac = &hw->mac;
        struct e1000_fc_info *fc = &hw->fc;
@@ -848,11 +1258,19 @@ void igb_reset(struct igb_adapter *adapter)
        /* Repartition Pba for greater than 9k mtu
         * To take effect CTRL.RST is required.
         */
-       if (mac->type != e1000_82576) {
-       pba = E1000_PBA_34K;
-       }
-       else {
-               pba = E1000_PBA_64K;
+       switch (mac->type) {
+       case e1000_82580:
+               pba = rd32(E1000_RXPBS);
+               pba = igb_rxpbs_adjust_82580(pba);
+               break;
+       case e1000_82576:
+               pba = rd32(E1000_RXPBS);
+               pba &= E1000_RXPBS_SIZE_MASK_82576;
+               break;
+       case e1000_82575:
+       default:
+               pba = E1000_PBA_34K;
+               break;
        }
 
        if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -874,7 +1292,7 @@ void igb_reset(struct igb_adapter *adapter)
                /* the tx fifo also stores 16 bytes of information about the tx
                 * but don't include ethernet FCS because hardware appends it */
                min_tx_space = (adapter->max_frame_size +
-                               sizeof(struct e1000_tx_desc) -
+                               sizeof(union e1000_adv_tx_desc) -
                                ETH_FCS_LEN) * 2;
                min_tx_space = ALIGN(min_tx_space, 1024);
                min_tx_space >>= 10;
@@ -907,55 +1325,56 @@ void igb_reset(struct igb_adapter *adapter)
        hwm = min(((pba << 10) * 9 / 10),
                        ((pba << 10) - 2 * adapter->max_frame_size));
 
-       if (mac->type < e1000_82576) {
-               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
-               fc->low_water = fc->high_water - 8;
-       } else {
-               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
-               fc->low_water = fc->high_water - 16;
-       }
+       fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
+       fc->low_water = fc->high_water - 16;
        fc->pause_time = 0xFFFF;
        fc->send_xon = 1;
-       fc->type = fc->original_type;
+       fc->current_mode = fc->requested_mode;
+
+       /* disable receive for all VFs and wait one second */
+       if (adapter->vfs_allocated_count) {
+               int i;
+               for (i = 0 ; i < adapter->vfs_allocated_count; i++)
+                       adapter->vf_data[i].flags = 0;
+
+               /* ping all the active vfs to let them know we are going down */
+               igb_ping_all_vfs(adapter);
+
+               /* disable transmits and receives */
+               wr32(E1000_VFRE, 0);
+               wr32(E1000_VFTE, 0);
+       }
 
        /* Allow time for pending master requests to run */
-       adapter->hw.mac.ops.reset_hw(&adapter->hw);
+       hw->mac.ops.reset_hw(hw);
        wr32(E1000_WUC, 0);
 
-       if (adapter->hw.mac.ops.init_hw(&adapter->hw))
-               dev_err(&adapter->pdev->dev, "Hardware Error\n");
+       if (hw->mac.ops.init_hw(hw))
+               dev_err(&pdev->dev, "Hardware Error\n");
+
+       if (hw->mac.type == e1000_82580) {
+               u32 reg = rd32(E1000_PCIEMISC);
+               wr32(E1000_PCIEMISC,
+                               reg & ~E1000_PCIEMISC_LX_DECISION);
+       }
+       if (!netif_running(adapter->netdev))
+               igb_power_down_link(adapter);
 
        igb_update_mng_vlan(adapter);
 
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
        wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-       igb_reset_adaptive(&adapter->hw);
-       if (adapter->hw.phy.ops.get_phy_info)
-               adapter->hw.phy.ops.get_phy_info(&adapter->hw);
-}
-
-/**
- * igb_is_need_ioport - determine if an adapter needs ioport resources or not
- * @pdev: PCI device information struct
- *
- * Returns true if an adapter needs ioport resources
- **/
-static int igb_is_need_ioport(struct pci_dev *pdev)
-{
-       switch (pdev->device) {
-       /* Currently there are no adapters that need ioport resources */
-       default:
-               return false;
-       }
+       igb_get_phy_info(hw);
 }
 
 static const struct net_device_ops igb_netdev_ops = {
-       .ndo_open               = igb_open,
+       .ndo_open               = igb_open,
        .ndo_stop               = igb_close,
        .ndo_start_xmit         = igb_xmit_frame_adv,
        .ndo_get_stats          = igb_get_stats,
-       .ndo_set_multicast_list = igb_set_multi,
+       .ndo_set_rx_mode        = igb_set_rx_mode,
+       .ndo_set_multicast_list = igb_set_rx_mode,
        .ndo_set_mac_address    = igb_set_mac,
        .ndo_change_mtu         = igb_change_mtu,
        .ndo_do_ioctl           = igb_ioctl,
@@ -964,6 +1383,10 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_vlan_rx_register   = igb_vlan_rx_register,
        .ndo_vlan_rx_add_vid    = igb_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = igb_vlan_rx_kill_vid,
+       .ndo_set_vf_mac         = igb_ndo_set_vf_mac,
+       .ndo_set_vf_vlan        = igb_ndo_set_vf_vlan,
+       .ndo_set_vf_tx_rate     = igb_ndo_set_vf_bw,
+       .ndo_get_vf_config      = igb_ndo_get_vf_config,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = igb_netpoll,
 #endif
@@ -986,37 +1409,28 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct igb_adapter *adapter;
        struct e1000_hw *hw;
-       struct pci_dev *us_dev;
+       u16 eeprom_data = 0;
+       static int global_quad_port_a; /* global quad port a indication */
        const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
        unsigned long mmio_start, mmio_len;
-       int i, err, pci_using_dac, pos;
-       u16 eeprom_data = 0, state = 0;
+       int err, pci_using_dac;
        u16 eeprom_apme_mask = IGB_EEPROM_APME;
        u32 part_num;
-       int bars, need_ioport;
 
-       /* do not allocate ioport bars when not needed */
-       need_ioport = igb_is_need_ioport(pdev);
-       if (need_ioport) {
-               bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
-               err = pci_enable_device(pdev);
-       } else {
-               bars = pci_select_bars(pdev, IORESOURCE_MEM);
-               err = pci_enable_device_mem(pdev);
-       }
+       err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
        pci_using_dac = 0;
-       err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (!err) {
-               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (!err)
                        pci_using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+                       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                        if (err) {
                                dev_err(&pdev->dev, "No usable DMA "
                                        "configuration, aborting\n");
@@ -1025,36 +1439,20 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                }
        }
 
-       /* 82575 requires that the pci-e link partner disable the L0s state */
-       switch (pdev->device) {
-       case E1000_DEV_ID_82575EB_COPPER:
-       case E1000_DEV_ID_82575EB_FIBER_SERDES:
-       case E1000_DEV_ID_82575GB_QUAD_COPPER:
-               us_dev = pdev->bus->self;
-               pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
-               if (pos) {
-                       pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
-                                            &state);
-                       state &= ~PCIE_LINK_STATE_L0S;
-                       pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
-                                             state);
-                       dev_info(&pdev->dev,
-                                "Disabling ASPM L0s upstream switch port %s\n",
-                                pci_name(us_dev));
-               }
-       default:
-               break;
-       }
-
-       err = pci_request_selected_regions(pdev, bars, igb_driver_name);
+       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+                                          IORESOURCE_MEM),
+                                          igb_driver_name);
        if (err)
                goto err_pci_reg;
 
+       pci_enable_pcie_error_reporting(pdev);
+
        pci_set_master(pdev);
        pci_save_state(pdev);
 
        err = -ENOMEM;
-       netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
+       netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
+                                  IGB_ABS_MAX_TX_QUEUES);
        if (!netdev)
                goto err_alloc_etherdev;
 
@@ -1067,15 +1465,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        hw = &adapter->hw;
        hw->back = adapter;
        adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
-       adapter->bars = bars;
-       adapter->need_ioport = need_ioport;
 
        mmio_start = pci_resource_start(pdev, 0);
        mmio_len = pci_resource_len(pdev, 0);
 
        err = -EIO;
-       adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
-       if (!adapter->hw.hw_addr)
+       hw->hw_addr = ioremap(mmio_start, mmio_len);
+       if (!hw->hw_addr)
                goto err_ioremap;
 
        netdev->netdev_ops = &igb_netdev_ops;
@@ -1094,8 +1490,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
 
-       /* setup the private structure */
-       hw->back = adapter;
        /* Copy the default MAC, PHY and NVM function pointers */
        memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
        memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1103,27 +1497,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        /* Initialize skew-specific constants */
        err = ei->get_invariants(hw);
        if (err)
-               goto err_hw_init;
+               goto err_sw_init;
 
+       /* setup the private structure */
        err = igb_sw_init(adapter);
        if (err)
                goto err_sw_init;
 
        igb_get_bus_info_pcie(hw);
 
-       /* set flags */
-       switch (hw->mac.type) {
-       case e1000_82576:
-       case e1000_82575:
-               adapter->flags |= IGB_FLAG_HAS_DCA;
-               adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-               break;
-       default:
-               break;
-       }
-
        hw->phy.autoneg_wait_to_complete = false;
-       hw->mac.adaptive_ifs = true;
 
        /* Copper options */
        if (hw->phy.media_type == e1000_media_type_copper) {
@@ -1137,28 +1520,29 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                        "PHY reset is blocked due to SOL/IDER session.\n");
 
        netdev->features = NETIF_F_SG |
-                          NETIF_F_HW_CSUM |
+                          NETIF_F_IP_CSUM |
                           NETIF_F_HW_VLAN_TX |
                           NETIF_F_HW_VLAN_RX |
                           NETIF_F_HW_VLAN_FILTER;
 
+       netdev->features |= NETIF_F_IPV6_CSUM;
        netdev->features |= NETIF_F_TSO;
        netdev->features |= NETIF_F_TSO6;
-
-#ifdef CONFIG_IGB_LRO
-       netdev->features |= NETIF_F_LRO;
-#endif
+       netdev->features |= NETIF_F_GRO;
 
        netdev->vlan_features |= NETIF_F_TSO;
        netdev->vlan_features |= NETIF_F_TSO6;
-       netdev->vlan_features |= NETIF_F_HW_CSUM;
+       netdev->vlan_features |= NETIF_F_IP_CSUM;
+       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->features |= NETIF_F_LLTX;
-       adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
+       if (hw->mac.type >= e1000_82576)
+               netdev->features |= NETIF_F_SCTP_CSUM;
+
+       adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
 
        /* before reading the NVM, reset the controller to put the device in a
         * known good starting state */
@@ -1184,47 +1568,36 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                goto err_eeprom;
        }
 
-       init_timer(&adapter->watchdog_timer);
-       adapter->watchdog_timer.function = &igb_watchdog;
-       adapter->watchdog_timer.data = (unsigned long) adapter;
-
-       init_timer(&adapter->phy_info_timer);
-       adapter->phy_info_timer.function = &igb_update_phy_info;
-       adapter->phy_info_timer.data = (unsigned long) adapter;
+       setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+                   (unsigned long) adapter);
+       setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+                   (unsigned long) adapter);
 
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
 
-       /* Initialize link & ring properties that are user-changeable */
-       adapter->tx_ring->count = 256;
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               adapter->tx_ring[i].count = adapter->tx_ring->count;
-       adapter->rx_ring->count = 256;
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               adapter->rx_ring[i].count = adapter->rx_ring->count;
-
+       /* Initialize link properties that are user-changeable */
        adapter->fc_autoneg = true;
        hw->mac.autoneg = true;
        hw->phy.autoneg_advertised = 0x2f;
 
-       hw->fc.original_type = e1000_fc_default;
-       hw->fc.type = e1000_fc_default;
-
-       adapter->itr_setting = 3;
-       adapter->itr = IGB_START_ITR;
+       hw->fc.requested_mode = e1000_fc_default;
+       hw->fc.current_mode = e1000_fc_default;
 
        igb_validate_mdi_setting(hw);
 
-       adapter->rx_csum = 1;
-
        /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
         * enable the ACPI Magic Packet filter
         */
 
-       if (hw->bus.func == 0 ||
-           hw->device_id == E1000_DEV_ID_82575EB_COPPER)
-               hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
-                                    &eeprom_data);
+       if (hw->bus.func == 0)
+               hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+       else if (hw->mac.type == e1000_82580)
+               hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+                                NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+                                &eeprom_data);
+       else if (hw->bus.func == 1)
+               hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
 
        if (eeprom_data & eeprom_apme_mask)
                adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -1244,6 +1617,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
                        adapter->eeprom_wol = 0;
                break;
+       case E1000_DEV_ID_82576_QUAD_COPPER:
+               /* if quad port adapter, disable WoL on all but port A */
+               if (global_quad_port_a != 0)
+                       adapter->eeprom_wol = 0;
+               else
+                       adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+               /* Reset for multiple quad port adapters */
+               if (++global_quad_port_a == 4)
+                       global_quad_port_a = 0;
+               break;
        }
 
        /* initialize the wol settings based on the eeprom settings */
@@ -1257,36 +1640,32 @@ static int __devinit igb_probe(struct pci_dev *pdev,
         * driver. */
        igb_get_hw_control(adapter);
 
-       /* tell the stack to leave us alone until igb_open() is called */
-       netif_carrier_off(netdev);
-       netif_tx_stop_all_queues(netdev);
-
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
                goto err_register;
 
+       /* carrier off reporting is important to ethtool even BEFORE open */
+       netif_carrier_off(netdev);
+
 #ifdef CONFIG_IGB_DCA
-       if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
-           (dca_add_requester(&pdev->dev) == 0)) {
+       if (dca_add_requester(&pdev->dev) == 0) {
                adapter->flags |= IGB_FLAG_DCA_ENABLED;
                dev_info(&pdev->dev, "DCA enabled\n");
-               /* Always use CB2 mode, difference is masked
-                * in the CB driver. */
-               wr32(E1000_DCA_CTRL, 2);
                igb_setup_dca(adapter);
        }
-#endif
 
+#endif
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
                 netdev->name,
-                ((hw->bus.speed == e1000_bus_speed_2500)
-                 ? "2.5Gb/s" : "unknown"),
-                ((hw->bus.width == e1000_bus_width_pcie_x4)
-                 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
-                 ? "Width x1" : "unknown"),
+                ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+                                                           "unknown"),
+                ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+                 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+                 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+                  "unknown"),
                 netdev->dev_addr);
 
        igb_read_part_num(hw, &part_num);
@@ -1305,20 +1684,18 @@ err_register:
        igb_release_hw_control(adapter);
 err_eeprom:
        if (!igb_check_reset_block(hw))
-               hw->phy.ops.reset_phy(hw);
+               igb_reset_phy(hw);
 
        if (hw->flash_address)
                iounmap(hw->flash_address);
-
-       igb_remove_device(hw);
-       igb_free_queues(adapter);
 err_sw_init:
-err_hw_init:
+       igb_clear_interrupt_scheme(adapter);
        iounmap(hw->hw_addr);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_selected_regions(pdev, bars);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -1338,9 +1715,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_IGB_DCA
        struct e1000_hw *hw = &adapter->hw;
-#endif
 
        /* flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled  */
@@ -1355,7 +1730,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
                dev_info(&pdev->dev, "DCA disabled\n");
                dca_remove_requester(&pdev->dev);
                adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
-               wr32(E1000_DCA_CTRL, 1);
+               wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
        }
 #endif
 
@@ -1365,26 +1740,191 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
        unregister_netdev(netdev);
 
-       if (adapter->hw.phy.ops.reset_phy &&
-           !igb_check_reset_block(&adapter->hw))
-               adapter->hw.phy.ops.reset_phy(&adapter->hw);
+       igb_clear_interrupt_scheme(adapter);
 
-       igb_remove_device(&adapter->hw);
-       igb_reset_interrupt_capability(adapter);
+#ifdef CONFIG_PCI_IOV
+       /* reclaim resources allocated to VFs */
+       if (adapter->vf_data) {
+               /* disable iov and allow time for transactions to clear */
+               pci_disable_sriov(pdev);
+               msleep(500);
 
-       igb_free_queues(adapter);
+               kfree(adapter->vf_data);
+               adapter->vf_data = NULL;
+               wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+               msleep(100);
+               dev_info(&pdev->dev, "IOV Disabled\n");
+       }
+#endif
 
-       iounmap(adapter->hw.hw_addr);
-       if (adapter->hw.flash_address)
-               iounmap(adapter->hw.flash_address);
-       pci_release_selected_regions(pdev, adapter->bars);
+       iounmap(hw->hw_addr);
+       if (hw->flash_address)
+               iounmap(hw->flash_address);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
 
        free_netdev(netdev);
 
+       pci_disable_pcie_error_reporting(pdev);
+
        pci_disable_device(pdev);
 }
 
 /**
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
+ *
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs.  The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
+ **/
+static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+{
+#ifdef CONFIG_PCI_IOV
+       struct pci_dev *pdev = adapter->pdev;
+
+       if (adapter->vfs_allocated_count > 7)
+               adapter->vfs_allocated_count = 7;
+
+       if (adapter->vfs_allocated_count) {
+               adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+                                          sizeof(struct vf_data_storage),
+                                          GFP_KERNEL);
+               /* if allocation failed then we do not support SR-IOV */
+               if (!adapter->vf_data) {
+                       adapter->vfs_allocated_count = 0;
+                       dev_err(&pdev->dev, "Unable to allocate memory for VF "
+                               "Data Storage\n");
+               }
+       }
+
+       if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
+               kfree(adapter->vf_data);
+               adapter->vf_data = NULL;
+#endif /* CONFIG_PCI_IOV */
+               adapter->vfs_allocated_count = 0;
+#ifdef CONFIG_PCI_IOV
+       } else {
+               unsigned char mac_addr[ETH_ALEN];
+               int i;
+               dev_info(&pdev->dev, "%d vfs allocated\n",
+                        adapter->vfs_allocated_count);
+               for (i = 0; i < adapter->vfs_allocated_count; i++) {
+                       random_ether_addr(mac_addr);
+                       igb_set_vf_mac(adapter, i, mac_addr);
+               }
+       }
+#endif /* CONFIG_PCI_IOV */
+}
+
+
+/**
+ * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
+ * @adapter: board private structure to initialize
+ *
+ * igb_init_hw_timer initializes the function pointer and values for the hw
+ * timer found in hardware.
+ **/
+static void igb_init_hw_timer(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       switch (hw->mac.type) {
+       case e1000_82580:
+               memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+               adapter->cycles.read = igb_read_clock;
+               adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+               adapter->cycles.mult = 1;
+               /*
+                * The 82580 timesync updates the system timer every 8ns by 8ns
+                * and the value cannot be shifted.  Instead we need to shift
+                * the registers to generate a 64bit timer value.  As a result
+                * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
+                * 24 in order to generate a larger value for synchronization.
+                */
+               adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
+               /* disable system timer temporarily by setting bit 31 */
+               wr32(E1000_TSAUXC, 0x80000000);
+               wrfl();
+
+               /* Set registers so that rollover occurs soon to test this. */
+               wr32(E1000_SYSTIMR, 0x00000000);
+               wr32(E1000_SYSTIML, 0x80000000);
+               wr32(E1000_SYSTIMH, 0x000000FF);
+               wrfl();
+
+               /* enable system timer by clearing bit 31 */
+               wr32(E1000_TSAUXC, 0x0);
+               wrfl();
+
+               timecounter_init(&adapter->clock,
+                                &adapter->cycles,
+                                ktime_to_ns(ktime_get_real()));
+               /*
+                * Synchronize our NIC clock against system wall clock. NIC
+                * time stamp reading requires ~3us per sample, each sample
+                * was pretty stable even under load => only require 10
+                * samples for each offset comparison.
+                */
+               memset(&adapter->compare, 0, sizeof(adapter->compare));
+               adapter->compare.source = &adapter->clock;
+               adapter->compare.target = ktime_get_real;
+               adapter->compare.num_samples = 10;
+               timecompare_update(&adapter->compare, 0);
+               break;
+       case e1000_82576:
+               /*
+                * Initialize hardware timer: we keep it running just in case
+                * that some program needs it later on.
+                */
+               memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+               adapter->cycles.read = igb_read_clock;
+               adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+               adapter->cycles.mult = 1;
+               /**
+                * Scale the NIC clock cycle by a large factor so that
+                * relatively small clock corrections can be added or
+                * substracted at each clock tick. The drawbacks of a large
+                * factor are a) that the clock register overflows more quickly
+                * (not such a big deal) and b) that the increment per tick has
+                * to fit into 24 bits.  As a result we need to use a shift of
+                * 19 so we can fit a value of 16 into the TIMINCA register.
+                */
+               adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
+               wr32(E1000_TIMINCA,
+                               (1 << E1000_TIMINCA_16NS_SHIFT) |
+                               (16 << IGB_82576_TSYNC_SHIFT));
+
+               /* Set registers so that rollover occurs soon to test this. */
+               wr32(E1000_SYSTIML, 0x00000000);
+               wr32(E1000_SYSTIMH, 0xFF800000);
+               wrfl();
+
+               timecounter_init(&adapter->clock,
+                                &adapter->cycles,
+                                ktime_to_ns(ktime_get_real()));
+               /*
+                * Synchronize our NIC clock against system wall clock. NIC
+                * time stamp reading requires ~3us per sample, each sample
+                * was pretty stable even under load => only require 10
+                * samples for each offset comparison.
+                */
+               memset(&adapter->compare, 0, sizeof(adapter->compare));
+               adapter->compare.source = &adapter->clock;
+               adapter->compare.target = ktime_get_real;
+               adapter->compare.num_samples = 10;
+               timecompare_update(&adapter->compare, 0);
+               break;
+       case e1000_82575:
+               /* 82575 does not support timesync */
+       default:
+               break;
+       }
+
+}
+
+/**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
  *
@@ -1402,25 +1942,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
        adapter->tx_ring_count = IGB_DEFAULT_TXD;
        adapter->rx_ring_count = IGB_DEFAULT_RXD;
-       adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       adapter->rx_ps_hdr_size = 0; /* disable packet split */
+       adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+       adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-       /* Number of supported queues. */
-       /* Having more queues than CPUs doesn't make sense. */
-       adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus());
-       adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus());
+#ifdef CONFIG_PCI_IOV
+       if (hw->mac.type == e1000_82576)
+               adapter->vfs_allocated_count = max_vfs;
 
-       /* This call may decrease the number of queues depending on
-        * interrupt mode. */
-       igb_set_interrupt_capability(adapter);
+#endif /* CONFIG_PCI_IOV */
+       adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+
+       /*
+        * if rss_queues > 4 or vfs are going to be allocated with rss_queues
+        * then we should combine the queues into a queue pair in order to
+        * conserve interrupts due to limited supply
+        */
+       if ((adapter->rss_queues > 4) ||
+           ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
+               adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
 
-       if (igb_alloc_queues(adapter)) {
+       /* This call may decrease the number of queues */
+       if (igb_init_interrupt_scheme(adapter)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
 
+       igb_init_hw_timer(adapter);
+       igb_probe_vfs(adapter);
+
        /* Explicitly disable IRQ since the NIC can be in any state. */
        igb_irq_disable(adapter);
 
@@ -1451,6 +2003,8 @@ static int igb_open(struct net_device *netdev)
        if (test_bit(__IGB_TESTING, &adapter->state))
                return -EBUSY;
 
+       netif_carrier_off(netdev);
+
        /* allocate transmit descriptors */
        err = igb_setup_all_tx_resources(adapter);
        if (err)
@@ -1461,12 +2015,7 @@ static int igb_open(struct net_device *netdev)
        if (err)
                goto err_setup_rx;
 
-       /* e1000_power_up_phy(adapter); */
-
-       adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-       if ((adapter->hw.mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
-               igb_update_mng_vlan(adapter);
+       igb_power_up_link(adapter);
 
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -1481,24 +2030,34 @@ static int igb_open(struct net_device *netdev)
        /* From here on the code is the same as igb_up() */
        clear_bit(__IGB_DOWN, &adapter->state);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_enable(&adapter->rx_ring[i].napi);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               napi_enable(&q_vector->napi);
+       }
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
 
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(netdev);
 
-       /* Fire a link status change interrupt to start the watchdog. */
-       wr32(E1000_ICS, E1000_ICS_LSC);
+       /* start the watchdog. */
+       hw->mac.get_link_status = 1;
+       schedule_work(&adapter->watchdog_task);
 
        return 0;
 
 err_req_irq:
        igb_release_hw_control(adapter);
-       /* e1000_power_down_phy(adapter); */
+       igb_power_down_link(adapter);
        igb_free_all_rx_resources(adapter);
 err_setup_rx:
        igb_free_all_tx_resources(adapter);
@@ -1531,29 +2090,18 @@ static int igb_close(struct net_device *netdev)
        igb_free_all_tx_resources(adapter);
        igb_free_all_rx_resources(adapter);
 
-       /* kill manageability vlan ID if supported, but not if a vlan with
-        * the same ID is registered on the host OS (let 8021q kill it) */
-       if ((adapter->hw.mng_cookie.status &
-                         E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-            !(adapter->vlgrp &&
-              vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
-               igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-
        return 0;
 }
 
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-
-int igb_setup_tx_resources(struct igb_adapter *adapter,
-                          struct igb_ring *tx_ring)
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = tx_ring->pdev;
        int size;
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1563,24 +2111,23 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
        memset(tx_ring->buffer_info, 0, size);
 
        /* round up to nearest 4K */
-       tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
-                       + sizeof(u32);
+       tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+       tx_ring->desc = pci_alloc_consistent(pdev,
+                                            tx_ring->size,
                                             &tx_ring->dma);
 
        if (!tx_ring->desc)
                goto err;
 
-       tx_ring->adapter = adapter;
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
        return 0;
 
 err:
        vfree(tx_ring->buffer_info);
-       dev_err(&adapter->pdev->dev,
+       dev_err(&pdev->dev,
                "Unable to allocate memory for the transmit descriptor ring\n");
        return -ENOMEM;
 }
@@ -1594,80 +2141,40 @@ err:
  **/
 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
        int i, err = 0;
-       int r_idx;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               err = igb_setup_tx_resources(adapter->tx_ring[i]);
                if (err) {
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&pdev->dev,
                                "Allocation for Tx Queue %u failed\n", i);
                        for (i--; i >= 0; i--)
-                               igb_free_tx_resources(&adapter->tx_ring[i]);
+                               igb_free_tx_resources(adapter->tx_ring[i]);
                        break;
                }
        }
 
-       for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
-               r_idx = i % adapter->num_tx_queues;
-               adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
-       }       
+       for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
+               int r_idx = i % adapter->num_tx_queues;
+               adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
+       }
        return err;
 }
 
 /**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
  **/
-static void igb_configure_tx(struct igb_adapter *adapter)
+void igb_setup_tctl(struct igb_adapter *adapter)
 {
-       u64 tdba, tdwba;
        struct e1000_hw *hw = &adapter->hw;
        u32 tctl;
-       u32 txdctl, txctrl;
-       int i;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct igb_ring *ring = &(adapter->tx_ring[i]);
-
-               wr32(E1000_TDLEN(i),
-                               ring->count * sizeof(struct e1000_tx_desc));
-               tdba = ring->dma;
-               wr32(E1000_TDBAL(i),
-                               tdba & 0x00000000ffffffffULL);
-               wr32(E1000_TDBAH(i), tdba >> 32);
-
-               tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
-               tdwba |= 1; /* enable head wb */
-               wr32(E1000_TDWBAL(i),
-                               tdwba & 0x00000000ffffffffULL);
-               wr32(E1000_TDWBAH(i), tdwba >> 32);
-
-               ring->head = E1000_TDH(i);
-               ring->tail = E1000_TDT(i);
-               writel(0, hw->hw_addr + ring->tail);
-               writel(0, hw->hw_addr + ring->head);
-               txdctl = rd32(E1000_TXDCTL(i));
-               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-               wr32(E1000_TXDCTL(i), txdctl);
-
-               /* Turn off Relaxed Ordering on head write-backs.  The
-                * writebacks MUST be delivered in order or it will
-                * completely screw up our bookeeping.
-                */
-               txctrl = rd32(E1000_DCA_TXCTRL(i));
-               txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
-               wr32(E1000_DCA_TXCTRL(i), txctrl);
-       }
-
 
-
-       /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
+       /* disable queue 0 which is enabled by default on 82575 and 82576 */
+       wr32(E1000_TXDCTL(0), 0);
 
        /* Program the Transmit Control Register */
-
        tctl = rd32(E1000_TCTL);
        tctl &= ~E1000_TCTL_CT;
        tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
@@ -1675,9 +2182,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
        igb_config_collision_dist(hw);
 
-       /* Setup Transmit Descriptor Settings for eop descriptor */
-       adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
-
        /* Enable transmits */
        tctl |= E1000_TCTL_EN;
 
@@ -1685,27 +2189,71 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 }
 
 /**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 txdctl;
+       u64 tdba = ring->dma;
+       int reg_idx = ring->reg_idx;
+
+       /* disable the queue */
+       txdctl = rd32(E1000_TXDCTL(reg_idx));
+       wr32(E1000_TXDCTL(reg_idx),
+                       txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+       wrfl();
+       mdelay(10);
+
+       wr32(E1000_TDLEN(reg_idx),
+                       ring->count * sizeof(union e1000_adv_tx_desc));
+       wr32(E1000_TDBAL(reg_idx),
+                       tdba & 0x00000000ffffffffULL);
+       wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+       ring->head = hw->hw_addr + E1000_TDH(reg_idx);
+       ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+       writel(0, ring->head);
+       writel(0, ring->tail);
+
+       txdctl |= IGB_TX_PTHRESH;
+       txdctl |= IGB_TX_HTHRESH << 8;
+       txdctl |= IGB_TX_WTHRESH << 16;
+
+       txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+       wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
  * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-
-int igb_setup_rx_resources(struct igb_adapter *adapter,
-                          struct igb_ring *rx_ring)
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = rx_ring->pdev;
        int size, desc_len;
 
-#ifdef CONFIG_IGB_LRO
-       size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
-       rx_ring->lro_mgr.lro_arr = vmalloc(size);
-       if (!rx_ring->lro_mgr.lro_arr)
-               goto err;
-       memset(rx_ring->lro_mgr.lro_arr, 0, size);
-#endif
-
        size = sizeof(struct igb_buffer) * rx_ring->count;
        rx_ring->buffer_info = vmalloc(size);
        if (!rx_ring->buffer_info)
@@ -1727,17 +2275,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       rx_ring->adapter = adapter;
-
        return 0;
 
 err:
-#ifdef CONFIG_IGB_LRO
-       vfree(rx_ring->lro_mgr.lro_arr);
-       rx_ring->lro_mgr.lro_arr = NULL;
-#endif
        vfree(rx_ring->buffer_info);
-       dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
+       rx_ring->buffer_info = NULL;
+       dev_err(&pdev->dev, "Unable to allocate memory for "
                "the receive descriptor ring\n");
        return -ENOMEM;
 }
@@ -1751,15 +2294,16 @@ err:
  **/
 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 {
+       struct pci_dev *pdev = adapter->pdev;
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               err = igb_setup_rx_resources(adapter->rx_ring[i]);
                if (err) {
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&pdev->dev,
                                "Allocation for Rx Queue %u failed\n", i);
                        for (i--; i >= 0; i--)
-                               igb_free_rx_resources(&adapter->rx_ring[i]);
+                               igb_free_rx_resources(adapter->rx_ring[i]);
                        break;
                }
        }
@@ -1768,238 +2312,350 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 }
 
 /**
- * igb_setup_rctl - configure the receive control registers
+ * igb_setup_mrqc - configure the multiple receive queue control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+static void igb_setup_mrqc(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 rctl;
-       u32 srrctl = 0;
-       int i;
-
-       rctl = rd32(E1000_RCTL);
-
-       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+       u32 mrqc, rxcsum;
+       u32 j, num_rx_queues, shift = 0, shift2 = 0;
+       union e1000_reta {
+               u32 dword;
+               u8  bytes[4];
+       } reta;
+       static const u8 rsshash[40] = {
+               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+       /* Fill out hash function seeds */
+       for (j = 0; j < 10; j++) {
+               u32 rsskey = rsshash[(j * 4)];
+               rsskey |= rsshash[(j * 4) + 1] << 8;
+               rsskey |= rsshash[(j * 4) + 2] << 16;
+               rsskey |= rsshash[(j * 4) + 3] << 24;
+               array_wr32(E1000_RSSRK(0), j, rsskey);
+       }
 
-       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
-               E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
-               (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+       num_rx_queues = adapter->rss_queues;
 
-       /*
-        * enable stripping of CRC. It's unlikely this will break BMC
-        * redirection as it did with e1000. Newer features require
-        * that the HW strips the CRC.
-       */
-       rctl |= E1000_RCTL_SECRC;
-
-       rctl &= ~E1000_RCTL_SBP;
-
-       if (adapter->netdev->mtu <= ETH_DATA_LEN)
-               rctl &= ~E1000_RCTL_LPE;
-       else
-               rctl |= E1000_RCTL_LPE;
-       if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
-               /* Setup buffer sizes */
-               rctl &= ~E1000_RCTL_SZ_4096;
-               rctl |= E1000_RCTL_BSEX;
-               switch (adapter->rx_buffer_len) {
-               case IGB_RXBUFFER_256:
-                       rctl |= E1000_RCTL_SZ_256;
-                       rctl &= ~E1000_RCTL_BSEX;
-                       break;
-               case IGB_RXBUFFER_512:
-                       rctl |= E1000_RCTL_SZ_512;
-                       rctl &= ~E1000_RCTL_BSEX;
+       if (adapter->vfs_allocated_count) {
+               /* 82575 and 82576 supports 2 RSS queues for VMDq */
+               switch (hw->mac.type) {
+               case e1000_82580:
+                       num_rx_queues = 1;
+                       shift = 0;
                        break;
-               case IGB_RXBUFFER_1024:
-                       rctl |= E1000_RCTL_SZ_1024;
-                       rctl &= ~E1000_RCTL_BSEX;
+               case e1000_82576:
+                       shift = 3;
+                       num_rx_queues = 2;
                        break;
-               case IGB_RXBUFFER_2048:
+               case e1000_82575:
+                       shift = 2;
+                       shift2 = 6;
                default:
-                       rctl |= E1000_RCTL_SZ_2048;
-                       rctl &= ~E1000_RCTL_BSEX;
                        break;
                }
        } else {
-               rctl &= ~E1000_RCTL_BSEX;
-               srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+               if (hw->mac.type == e1000_82575)
+                       shift = 6;
        }
 
-       /* 82575 and greater support packet-split where the protocol
-        * header is placed in skb->data and the packet data is
-        * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-        * In the case of a non-split, skb->data is linearly filled,
-        * followed by the page buffers.  Therefore, skb->data is
-        * sized to hold the largest protocol header.
+       for (j = 0; j < (32 * 4); j++) {
+               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+               if (shift2)
+                       reta.bytes[j & 3] |= num_rx_queues << shift2;
+               if ((j & 3) == 3)
+                       wr32(E1000_RETA(j >> 2), reta.dword);
+       }
+
+       /*
+        * Disable raw packet checksumming so that RSS hash is placed in
+        * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+        * offloads as they are enabled by default
         */
-       /* allocations using alloc_page take too long for regular MTU
-        * so only enable packet split for jumbo frames */
-       if (rctl & E1000_RCTL_LPE) {
-               adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-               srrctl |= adapter->rx_ps_hdr_size <<
-                        E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+       rxcsum = rd32(E1000_RXCSUM);
+       rxcsum |= E1000_RXCSUM_PCSD;
+
+       if (adapter->hw.mac.type >= e1000_82576)
+               /* Enable Receive Checksum Offload for SCTP */
+               rxcsum |= E1000_RXCSUM_CRCOFL;
+
+       /* Don't need to set TUOFL or IPOFL, they default to 1 */
+       wr32(E1000_RXCSUM, rxcsum);
+
+       /* If VMDq is enabled then we set the appropriate mode for that, else
+        * we default to RSS so that an RSS hash is calculated per packet even
+        * if we are only using one queue */
+       if (adapter->vfs_allocated_count) {
+               if (hw->mac.type > e1000_82575) {
+                       /* Set the default pool for the PF's first queue */
+                       u32 vtctl = rd32(E1000_VT_CTL);
+                       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+                                  E1000_VT_CTL_DISABLE_DEF_POOL);
+                       vtctl |= adapter->vfs_allocated_count <<
+                               E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+                       wr32(E1000_VT_CTL, vtctl);
+               }
+               if (adapter->rss_queues > 1)
+                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+               else
+                       mrqc = E1000_MRQC_ENABLE_VMDQ;
        } else {
-               adapter->rx_ps_hdr_size = 0;
-               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
        }
+       igb_vmm_control(adapter);
+
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                E1000_MRQC_RSS_FIELD_IPV4_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
+                E1000_MRQC_RSS_FIELD_IPV6_UDP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+       wr32(E1000_MRQC, mrqc);
+}
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               wr32(E1000_SRRCTL(i), srrctl);
+/**
+ * igb_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+void igb_setup_rctl(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 rctl;
+
+       rctl = rd32(E1000_RCTL);
+
+       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+       rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+
+       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
+               (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+       /*
+        * enable stripping of CRC. It's unlikely this will break BMC
+        * redirection as it did with e1000. Newer features require
+        * that the HW strips the CRC.
+        */
+       rctl |= E1000_RCTL_SECRC;
+
+       /* disable store bad packets and clear size bits. */
+       rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
+
+       /* enable LPE to prevent packets larger than max_frame_size */
+       rctl |= E1000_RCTL_LPE;
+
+       /* disable queue 0 to prevent tail write w/o re-config */
+       wr32(E1000_RXDCTL(0), 0);
+
+       /* Attention!!!  For SR-IOV PF driver operations you must enable
+        * queue drop for all VF and PF queues to prevent head of line blocking
+        * if an un-trusted VF does not provide descriptors to hardware.
+        */
+       if (adapter->vfs_allocated_count) {
+               /* set all queue drop enable bits */
+               wr32(E1000_QDE, ALL_QUEUES);
+       }
 
        wr32(E1000_RCTL, rctl);
 }
 
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+                                   int vfn)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vmolr;
+
+       /* if it isn't the PF check to see if VFs are enabled and
+        * increase the size to support vlan tags */
+       if (vfn < adapter->vfs_allocated_count &&
+           adapter->vf_data[vfn].vlans_enabled)
+               size += VLAN_TAG_SIZE;
+
+       vmolr = rd32(E1000_VMOLR(vfn));
+       vmolr &= ~E1000_VMOLR_RLPML_MASK;
+       vmolr |= size | E1000_VMOLR_LPE;
+       wr32(E1000_VMOLR(vfn), vmolr);
+
+       return 0;
+}
+
 /**
- * igb_configure_rx - Configure receive Unit after Reset
+ * igb_rlpml_set - set maximum receive packet size
  * @adapter: board private structure
  *
- * Configure the Rx unit of the MAC after a reset.
+ * Configure maximum receivable packet size.
  **/
-static void igb_configure_rx(struct igb_adapter *adapter)
+static void igb_rlpml_set(struct igb_adapter *adapter)
 {
-       u64 rdba;
+       u32 max_frame_size = adapter->max_frame_size;
        struct e1000_hw *hw = &adapter->hw;
-       u32 rctl, rxcsum;
-       u32 rxdctl;
-       int i;
-
-       /* disable receives while setting up the descriptors */
-       rctl = rd32(E1000_RCTL);
-       wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-       wrfl();
-       mdelay(10);
+       u16 pf_id = adapter->vfs_allocated_count;
 
-       if (adapter->itr_setting > 3)
-               wr32(E1000_ITR, adapter->itr);
+       if (adapter->vlgrp)
+               max_frame_size += VLAN_TAG_SIZE;
 
-       /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *ring = &(adapter->rx_ring[i]);
-               rdba = ring->dma;
-               wr32(E1000_RDBAL(i),
-                               rdba & 0x00000000ffffffffULL);
-               wr32(E1000_RDBAH(i), rdba >> 32);
-               wr32(E1000_RDLEN(i),
-                              ring->count * sizeof(union e1000_adv_rx_desc));
-
-               ring->head = E1000_RDH(i);
-               ring->tail = E1000_RDT(i);
-               writel(0, hw->hw_addr + ring->tail);
-               writel(0, hw->hw_addr + ring->head);
-
-               rxdctl = rd32(E1000_RXDCTL(i));
-               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-               rxdctl &= 0xFFF00000;
-               rxdctl |= IGB_RX_PTHRESH;
-               rxdctl |= IGB_RX_HTHRESH << 8;
-               rxdctl |= IGB_RX_WTHRESH << 16;
-               wr32(E1000_RXDCTL(i), rxdctl);
-#ifdef CONFIG_IGB_LRO
-               /* Intitial LRO Settings */
-               ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
-               ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
-               ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
-               ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
-               ring->lro_mgr.dev = adapter->netdev;
-               ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
-               ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-#endif
+       /* if vfs are enabled we set RLPML to the largest possible request
+        * size and set the VMOLR RLPML to the size we need */
+       if (pf_id) {
+               igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+               max_frame_size = MAX_JUMBO_FRAME_SIZE;
        }
 
-       if (adapter->num_rx_queues > 1) {
-               u32 random[10];
-               u32 mrqc;
-               u32 j, shift;
-               union e1000_reta {
-                       u32 dword;
-                       u8  bytes[4];
-               } reta;
+       wr32(E1000_RLPML, max_frame_size);
+}
 
-               get_random_bytes(&random[0], 40);
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+                                int vfn, bool aupe)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vmolr;
 
-               if (hw->mac.type >= e1000_82576)
-                       shift = 0;
-               else
-                       shift = 6;
-               for (j = 0; j < (32 * 4); j++) {
-                       reta.bytes[j & 3] =
-                               (j % adapter->num_rx_queues) << shift;
-                       if ((j & 3) == 3)
-                               writel(reta.dword,
-                                      hw->hw_addr + E1000_RETA(0) + (j & ~3));
-               }
-               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+       /*
+        * This register exists only on 82576 and newer so if we are older then
+        * we should exit and do nothing
+        */
+       if (hw->mac.type < e1000_82576)
+               return;
 
-               /* Fill out hash function seeds */
-               for (j = 0; j < 10; j++)
-                       array_wr32(E1000_RSSRK(0), j, random[j]);
+       vmolr = rd32(E1000_VMOLR(vfn));
+       vmolr |= E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
+       if (aupe)
+               vmolr |= E1000_VMOLR_AUPE;        /* Accept untagged packets */
+       else
+               vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
 
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-                        E1000_MRQC_RSS_FIELD_IPV4_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-                        E1000_MRQC_RSS_FIELD_IPV6_UDP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+       /* clear all bits that might not be set */
+       vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
 
+       if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
+               vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+       /*
+        * for VMDq only allow the VFs and pool 0 to accept broadcast and
+        * multicast packets
+        */
+       if (vfn <= adapter->vfs_allocated_count)
+               vmolr |= E1000_VMOLR_BAM;          /* Accept broadcast */
 
-               wr32(E1000_MRQC, mrqc);
+       wr32(E1000_VMOLR(vfn), vmolr);
+}
 
-               /* Multiqueue and raw packet checksumming are mutually
-                * exclusive.  Note that this not the same as TCP/IP
-                * checksumming, which works fine. */
-               rxcsum = rd32(E1000_RXCSUM);
-               rxcsum |= E1000_RXCSUM_PCSD;
-               wr32(E1000_RXCSUM, rxcsum);
+/**
+ * igb_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u64 rdba = ring->dma;
+       int reg_idx = ring->reg_idx;
+       u32 srrctl, rxdctl;
+
+       /* disable the queue */
+       rxdctl = rd32(E1000_RXDCTL(reg_idx));
+       wr32(E1000_RXDCTL(reg_idx),
+                       rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+       /* Set DMA base address registers */
+       wr32(E1000_RDBAL(reg_idx),
+            rdba & 0x00000000ffffffffULL);
+       wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+       wr32(E1000_RDLEN(reg_idx),
+                      ring->count * sizeof(union e1000_adv_rx_desc));
+
+       /* initialize head and tail */
+       ring->head = hw->hw_addr + E1000_RDH(reg_idx);
+       ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+       writel(0, ring->head);
+       writel(0, ring->tail);
+
+       /* set descriptor configuration */
+       if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+               srrctl = ALIGN(ring->rx_buffer_len, 64) <<
+                        E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+               srrctl |= IGB_RXBUFFER_16384 >>
+                         E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+               srrctl |= (PAGE_SIZE / 2) >>
+                         E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
        } else {
-               /* Enable Receive Checksum Offload for TCP and UDP */
-               rxcsum = rd32(E1000_RXCSUM);
-               if (adapter->rx_csum) {
-                       rxcsum |= E1000_RXCSUM_TUOFL;
-
-                       /* Enable IPv4 payload checksum for UDP fragments
-                        * Must be used in conjunction with packet-split. */
-                       if (adapter->rx_ps_hdr_size)
-                               rxcsum |= E1000_RXCSUM_IPPCSE;
-               } else {
-                       rxcsum &= ~E1000_RXCSUM_TUOFL;
-                       /* don't need to clear IPPCSE as it defaults to 0 */
-               }
-               wr32(E1000_RXCSUM, rxcsum);
+               srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+                        E1000_SRRCTL_BSIZEPKT_SHIFT;
+               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
+       /* Only set Drop Enable if we are supporting multiple queues */
+       if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
+               srrctl |= E1000_SRRCTL_DROP_EN;
+
+       wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+       /* set filtering for VMDQ pools */
+       igb_set_vmolr(adapter, reg_idx & 0x7, true);
+
+       /* enable receive descriptor fetching */
+       rxdctl = rd32(E1000_RXDCTL(reg_idx));
+       rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+       rxdctl &= 0xFFF00000;
+       rxdctl |= IGB_RX_PTHRESH;
+       rxdctl |= IGB_RX_HTHRESH << 8;
+       rxdctl |= IGB_RX_WTHRESH << 16;
+       wr32(E1000_RXDCTL(reg_idx), rxdctl);
+}
 
-       if (adapter->vlgrp)
-               wr32(E1000_RLPML,
-                               adapter->max_frame_size + VLAN_TAG_SIZE);
-       else
-               wr32(E1000_RLPML, adapter->max_frame_size);
+/**
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+       int i;
 
-       /* Enable Receives */
-       wr32(E1000_RCTL, rctl);
+       /* set UTA to appropriate mode */
+       igb_set_uta(adapter);
+
+       /* set the correct pool for the PF default MAC address in entry 0 */
+       igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
+                        adapter->vfs_allocated_count);
+
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 /**
  * igb_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
 void igb_free_tx_resources(struct igb_ring *tx_ring)
 {
-       struct pci_dev *pdev = tx_ring->adapter->pdev;
-
        igb_clean_tx_ring(tx_ring);
 
        vfree(tx_ring->buffer_info);
        tx_ring->buffer_info = NULL;
 
-       pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+       /* if not set, then don't free */
+       if (!tx_ring->desc)
+               return;
+
+       pci_free_consistent(tx_ring->pdev, tx_ring->size,
+                           tx_ring->desc, tx_ring->dma);
 
        tx_ring->desc = NULL;
 }
@@ -2015,17 +2671,23 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_free_tx_resources(&adapter->tx_ring[i]);
+               igb_free_tx_resources(adapter->tx_ring[i]);
 }
 
-static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
-                                          struct igb_buffer *buffer_info)
+void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
+                                   struct igb_buffer *buffer_info)
 {
        if (buffer_info->dma) {
-               pci_unmap_page(adapter->pdev,
-                               buffer_info->dma,
-                               buffer_info->length,
-                               PCI_DMA_TODEVICE);
+               if (buffer_info->mapped_as_page)
+                       pci_unmap_page(tx_ring->pdev,
+                                       buffer_info->dma,
+                                       buffer_info->length,
+                                       PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_single(tx_ring->pdev,
+                                       buffer_info->dma,
+                                       buffer_info->length,
+                                       PCI_DMA_TODEVICE);
                buffer_info->dma = 0;
        }
        if (buffer_info->skb) {
@@ -2033,17 +2695,17 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
                buffer_info->skb = NULL;
        }
        buffer_info->time_stamp = 0;
-       /* buffer_info must be completely set up in the transmit path */
+       buffer_info->length = 0;
+       buffer_info->next_to_watch = 0;
+       buffer_info->mapped_as_page = false;
 }
 
 /**
  * igb_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = tx_ring->adapter;
        struct igb_buffer *buffer_info;
        unsigned long size;
        unsigned int i;
@@ -2054,21 +2716,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 
        for (i = 0; i < tx_ring->count; i++) {
                buffer_info = &tx_ring->buffer_info[i];
-               igb_unmap_and_free_tx_resource(adapter, buffer_info);
+               igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
        }
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
        /* Zero out the descriptor ring */
-
        memset(tx_ring->desc, 0, tx_ring->size);
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       writel(0, adapter->hw.hw_addr + tx_ring->head);
-       writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -2080,31 +2738,28 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_clean_tx_ring(&adapter->tx_ring[i]);
+               igb_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 /**
  * igb_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
  * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
 void igb_free_rx_resources(struct igb_ring *rx_ring)
 {
-       struct pci_dev *pdev = rx_ring->adapter->pdev;
-
        igb_clean_rx_ring(rx_ring);
 
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
 
-#ifdef CONFIG_IGB_LRO
-       vfree(rx_ring->lro_mgr.lro_arr);
-       rx_ring->lro_mgr.lro_arr = NULL;
-#endif 
+       /* if not set, then don't free */
+       if (!rx_ring->desc)
+               return;
 
-       pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+       pci_free_consistent(rx_ring->pdev, rx_ring->size,
+                           rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
 }
@@ -2120,36 +2775,30 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_free_rx_resources(&adapter->rx_ring[i]);
+               igb_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
  * igb_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
        struct igb_buffer *buffer_info;
-       struct pci_dev *pdev = adapter->pdev;
        unsigned long size;
        unsigned int i;
 
        if (!rx_ring->buffer_info)
                return;
+
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
-                       if (adapter->rx_ps_hdr_size)
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_ps_hdr_size,
-                                                PCI_DMA_FROMDEVICE);
-                       else
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_buffer_len,
-                                                PCI_DMA_FROMDEVICE);
+                       pci_unmap_single(rx_ring->pdev,
+                                        buffer_info->dma,
+                                        rx_ring->rx_buffer_len,
+                                        PCI_DMA_FROMDEVICE);
                        buffer_info->dma = 0;
                }
 
@@ -2157,14 +2806,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
                }
+               if (buffer_info->page_dma) {
+                       pci_unmap_page(rx_ring->pdev,
+                                      buffer_info->page_dma,
+                                      PAGE_SIZE / 2,
+                                      PCI_DMA_FROMDEVICE);
+                       buffer_info->page_dma = 0;
+               }
                if (buffer_info->page) {
-                       if (buffer_info->page_dma)
-                               pci_unmap_page(pdev, buffer_info->page_dma,
-                                              PAGE_SIZE / 2,
-                                              PCI_DMA_FROMDEVICE);
                        put_page(buffer_info->page);
                        buffer_info->page = NULL;
-                       buffer_info->page_dma = 0;
                        buffer_info->page_offset = 0;
                }
        }
@@ -2177,9 +2828,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-
-       writel(0, adapter->hw.hw_addr + rx_ring->head);
-       writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -2191,7 +2839,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_clean_rx_ring(&adapter->rx_ring[i]);
+               igb_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -2204,78 +2852,183 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
 static int igb_set_mac(struct net_device *netdev, void *p)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+       memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+       /* set the correct pool for the new PF MAC address in entry 0 */
+       igb_rar_set_qsel(adapter, hw->mac.addr, 0,
+                        adapter->vfs_allocated_count);
 
        return 0;
 }
 
 /**
- * igb_set_multi - Multicast and Promiscuous mode set
+ * igb_write_mc_addr_list - write multicast addresses to MTA
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
  **/
-static void igb_set_multi(struct net_device *netdev)
+static int igb_write_mc_addr_list(struct net_device *netdev)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       struct e1000_mac_info *mac = &hw->mac;
-       struct dev_mc_list *mc_ptr;
+       struct dev_mc_list *mc_ptr = netdev->mc_list;
        u8  *mta_list;
-       u32 rctl;
+       u32 vmolr = 0;
        int i;
 
-       /* Check for Promiscuous and All Multicast modes */
-
-       rctl = rd32(E1000_RCTL);
-
-       if (netdev->flags & IFF_PROMISC) {
-               rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-               rctl &= ~E1000_RCTL_VFE;
-       } else {
-               if (netdev->flags & IFF_ALLMULTI) {
-                       rctl |= E1000_RCTL_MPE;
-                       rctl &= ~E1000_RCTL_UPE;
-               } else
-                       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
-               rctl |= E1000_RCTL_VFE;
-       }
-       wr32(E1000_RCTL, rctl);
-
-       if (!netdev->mc_count) {
+       if (netdev_mc_empty(netdev)) {
                /* nothing to program, so clear mc list */
-               igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
-                                         mac->rar_entry_count);
-               return;
+               igb_update_mc_addr_list(hw, NULL, 0);
+               igb_restore_vf_multicasts(adapter);
+               return 0;
        }
 
-       mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+       mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
        if (!mta_list)
-               return;
+               return -ENOMEM;
+
+       /* set vmolr receive overflow multicast bit */
+       vmolr |= E1000_VMOLR_ROMPE;
 
        /* The shared function expects a packed array of only addresses. */
        mc_ptr = netdev->mc_list;
 
-       for (i = 0; i < netdev->mc_count; i++) {
+       for (i = 0; i < netdev_mc_count(netdev); i++) {
                if (!mc_ptr)
                        break;
                memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
                mc_ptr = mc_ptr->next;
        }
-       igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
-                                     mac->rar_entry_count);
+       igb_update_mc_addr_list(hw, mta_list, i);
        kfree(mta_list);
+
+       return netdev_mc_count(netdev);
+}
+
+/**
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct net_device *netdev)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int vfn = adapter->vfs_allocated_count;
+       unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
+       int count = 0;
+
+       /* return ENOMEM indicating insufficient memory for addresses */
+       if (netdev_uc_count(netdev) > rar_entries)
+               return -ENOMEM;
+
+       if (!netdev_uc_empty(netdev) && rar_entries) {
+               struct netdev_hw_addr *ha;
+
+               netdev_for_each_uc_addr(ha, netdev) {
+                       if (!rar_entries)
+                               break;
+                       igb_rar_set_qsel(adapter, ha->addr,
+                                        rar_entries--,
+                                        vfn);
+                       count++;
+               }
+       }
+       /* write the addresses in reverse order to avoid write combining */
+       for (; rar_entries > 0 ; rar_entries--) {
+               wr32(E1000_RAH(rar_entries), 0);
+               wr32(E1000_RAL(rar_entries), 0);
+       }
+       wrfl();
+
+       return count;
+}
+
+/**
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct net_device *netdev)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int vfn = adapter->vfs_allocated_count;
+       u32 rctl, vmolr = 0;
+       int count;
+
+       /* Check for Promiscuous and All Multicast modes */
+       rctl = rd32(E1000_RCTL);
+
+       /* clear the effected bits */
+       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+       if (netdev->flags & IFF_PROMISC) {
+               rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+               vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+       } else {
+               if (netdev->flags & IFF_ALLMULTI) {
+                       rctl |= E1000_RCTL_MPE;
+                       vmolr |= E1000_VMOLR_MPME;
+               } else {
+                       /*
+                        * Write addresses to the MTA, if the attempt fails
+                        * then we should just turn on promiscous mode so
+                        * that we can at least receive multicast traffic
+                        */
+                       count = igb_write_mc_addr_list(netdev);
+                       if (count < 0) {
+                               rctl |= E1000_RCTL_MPE;
+                               vmolr |= E1000_VMOLR_MPME;
+                       } else if (count) {
+                               vmolr |= E1000_VMOLR_ROMPE;
+                       }
+               }
+               /*
+                * Write addresses to available RAR registers, if there is not
+                * sufficient space to store all the addresses then enable
+                * unicast promiscous mode
+                */
+               count = igb_write_uc_addr_list(netdev);
+               if (count < 0) {
+                       rctl |= E1000_RCTL_UPE;
+                       vmolr |= E1000_VMOLR_ROPE;
+               }
+               rctl |= E1000_RCTL_VFE;
+       }
+       wr32(E1000_RCTL, rctl);
+
+       /*
+        * In order to support SR-IOV and eventually VMDq it is necessary to set
+        * the VMOLR to enable the appropriate modes.  Without this workaround
+        * we will have issues with VLAN tag stripping not being done for frames
+        * that are only arriving because we are the default pool
+        */
+       if (hw->mac.type < e1000_82576)
+               return;
+
+       vmolr |= rd32(E1000_VMOLR(vfn)) &
+                ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+       wr32(E1000_VMOLR(vfn), vmolr);
+       igb_restore_vf_multicasts(adapter);
 }
 
 /* Need to wait a few seconds after link up to get diagnostic information from
@@ -2283,8 +3036,43 @@ static void igb_set_multi(struct net_device *netdev)
 static void igb_update_phy_info(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *) data;
-       if (adapter->hw.phy.ops.get_phy_info)
-               adapter->hw.phy.ops.get_phy_info(&adapter->hw);
+       igb_get_phy_info(&adapter->hw);
+}
+
+/**
+ * igb_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
+ **/
+bool igb_has_link(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       bool link_active = false;
+       s32 ret_val = 0;
+
+       /* get_link_status is set on LSC (link status) interrupt or
+        * rx sequence error interrupt.  get_link_status will stay
+        * false until the e1000_check_for_link establishes link
+        * for copper adapters ONLY
+        */
+       switch (hw->phy.media_type) {
+       case e1000_media_type_copper:
+               if (hw->mac.get_link_status) {
+                       ret_val = hw->mac.ops.check_for_link(hw);
+                       link_active = !hw->mac.get_link_status;
+               } else {
+                       link_active = true;
+               }
+               break;
+       case e1000_media_type_internal_serdes:
+               ret_val = hw->mac.ops.check_for_link(hw);
+               link_active = hw->mac.serdes_has_link;
+               break;
+       default:
+       case e1000_media_type_unknown:
+               break;
+       }
+
+       return link_active;
 }
 
 /**
@@ -2301,54 +3089,33 @@ static void igb_watchdog(unsigned long data)
 static void igb_watchdog_task(struct work_struct *work)
 {
        struct igb_adapter *adapter = container_of(work,
-                                       struct igb_adapter, watchdog_task);
+                                                  struct igb_adapter,
+                                                   watchdog_task);
        struct e1000_hw *hw = &adapter->hw;
-
        struct net_device *netdev = adapter->netdev;
-       struct igb_ring *tx_ring = adapter->tx_ring;
-       struct e1000_mac_info *mac = &adapter->hw.mac;
        u32 link;
-       u32 eics = 0;
-       s32 ret_val;
        int i;
 
-       if ((netif_carrier_ok(netdev)) &&
-           (rd32(E1000_STATUS) & E1000_STATUS_LU))
-               goto link_up;
-
-       ret_val = hw->mac.ops.check_for_link(&adapter->hw);
-       if ((ret_val == E1000_ERR_PHY) &&
-           (hw->phy.type == e1000_phy_igp_3) &&
-           (rd32(E1000_CTRL) &
-            E1000_PHY_CTRL_GBE_DISABLE))
-               dev_info(&adapter->pdev->dev,
-                        "Gigabit has been disabled, downgrading speed\n");
-
-       if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
-           !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
-               link = mac->serdes_has_link;
-       else
-               link = rd32(E1000_STATUS) &
-                                     E1000_STATUS_LU;
-
+       link = igb_has_link(adapter);
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        u32 ctrl;
-                       hw->mac.ops.get_speed_and_duplex(&adapter->hw,
-                                                  &adapter->link_speed,
-                                                  &adapter->link_duplex);
+                       hw->mac.ops.get_speed_and_duplex(hw,
+                                                        &adapter->link_speed,
+                                                        &adapter->link_duplex);
 
                        ctrl = rd32(E1000_CTRL);
-                       dev_info(&adapter->pdev->dev,
-                                "NIC Link is Up %d Mbps %s, "
+                       /* Links status message must follow this format */
+                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
                                 "Flow Control: %s\n",
-                                adapter->link_speed,
-                                adapter->link_duplex == FULL_DUPLEX ?
+                              netdev->name,
+                              adapter->link_speed,
+                              adapter->link_duplex == FULL_DUPLEX ?
                                 "Full Duplex" : "Half Duplex",
-                                ((ctrl & E1000_CTRL_TFCE) && (ctrl &
-                                E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
-                                E1000_CTRL_RFCE) ? "RX" : ((ctrl &
-                                E1000_CTRL_TFCE) ? "TX" : "None")));
+                              ((ctrl & E1000_CTRL_TFCE) &&
+                               (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
+                              ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
+                              ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
 
                        /* tweak tx_queue_len according to speed/duplex and
                         * adjust the timeout factor */
@@ -2366,8 +3133,10 @@ static void igb_watchdog_task(struct work_struct *work)
                        }
 
                        netif_carrier_on(netdev);
-                       netif_tx_wake_all_queues(netdev);
 
+                       igb_ping_all_vfs(adapter);
+
+                       /* link state has changed, schedule phy info update */
                        if (!test_bit(__IGB_DOWN, &adapter->state))
                                mod_timer(&adapter->phy_info_timer,
                                          round_jiffies(jiffies + 2 * HZ));
@@ -2376,53 +3145,53 @@ static void igb_watchdog_task(struct work_struct *work)
                if (netif_carrier_ok(netdev)) {
                        adapter->link_speed = 0;
                        adapter->link_duplex = 0;
-                       dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+                       /* Links status message must follow this format */
+                       printk(KERN_INFO "igb: %s NIC Link is Down\n",
+                              netdev->name);
                        netif_carrier_off(netdev);
-                       netif_tx_stop_all_queues(netdev);
+
+                       igb_ping_all_vfs(adapter);
+
+                       /* link state has changed, schedule phy info update */
                        if (!test_bit(__IGB_DOWN, &adapter->state))
                                mod_timer(&adapter->phy_info_timer,
                                          round_jiffies(jiffies + 2 * HZ));
                }
        }
 
-link_up:
        igb_update_stats(adapter);
 
-       mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
-       adapter->tpt_old = adapter->stats.tpt;
-       mac->collision_delta = adapter->stats.colc - adapter->colc_old;
-       adapter->colc_old = adapter->stats.colc;
-
-       adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
-       adapter->gorc_old = adapter->stats.gorc;
-       adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
-       adapter->gotc_old = adapter->stats.gotc;
-
-       igb_update_adaptive(&adapter->hw);
-
-       if (!netif_carrier_ok(netdev)) {
-               if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct igb_ring *tx_ring = adapter->tx_ring[i];
+               if (!netif_carrier_ok(netdev)) {
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
                         * (Do the reset outside of interrupt context). */
-                       adapter->tx_timeout_count++;
-                       schedule_work(&adapter->reset_task);
+                       if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+                               adapter->tx_timeout_count++;
+                               schedule_work(&adapter->reset_task);
+                               /* return immediately since reset is imminent */
+                               return;
+                       }
                }
+
+               /* Force detection of hung controller every watchdog period */
+               tx_ring->detect_tx_hung = true;
        }
 
        /* Cause software interrupt to ensure rx ring is cleaned */
        if (adapter->msix_entries) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       eics |= adapter->rx_ring[i].eims_value;
+               u32 eics = 0;
+               for (i = 0; i < adapter->num_q_vectors; i++) {
+                       struct igb_q_vector *q_vector = adapter->q_vector[i];
+                       eics |= q_vector->eims_value;
+               }
                wr32(E1000_EICS, eics);
        } else {
                wr32(E1000_ICS, E1000_ICS_RXDMT0);
        }
 
-       /* Force detection of hung controller every watchdog period */
-       tx_ring->detect_tx_hung = true;
-
        /* Reset the timer */
        if (!test_bit(__IGB_DOWN, &adapter->state))
                mod_timer(&adapter->watchdog_timer,
@@ -2436,7 +3205,6 @@ enum latency_range {
        latency_invalid = 255
 };
 
-
 /**
  * igb_update_ring_itr - update the dynamic ITR value based on packet size
  *
@@ -2451,28 +3219,40 @@ enum latency_range {
  *      parameter (see igb_param.c)
  *      NOTE:  This function is called only when operating in a multiqueue
  *             receive environment.
- * @rx_ring: pointer to ring
+ * @q_vector: pointer to q_vector
  **/
-static void igb_update_ring_itr(struct igb_ring *rx_ring)
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
 {
-       int new_val = rx_ring->itr_val;
+       int new_val = q_vector->itr_val;
        int avg_wire_size = 0;
-       struct igb_adapter *adapter = rx_ring->adapter;
-
-       if (!rx_ring->total_packets)
-               goto clear_counts; /* no packets, so don't do anything */
+       struct igb_adapter *adapter = q_vector->adapter;
 
        /* For non-gigabit speeds, just fix the interrupt rate at 4000
         * ints/sec - ITR timer value of 120 ticks.
         */
        if (adapter->link_speed != SPEED_1000) {
-               new_val = 120;
+               new_val = 976;
                goto set_itr_val;
        }
-       avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
 
-       /* Add 24 bytes to size to account for CRC, preamble, and gap */
-       avg_wire_size += 24;
+       if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
+               struct igb_ring *ring = q_vector->rx_ring;
+               avg_wire_size = ring->total_bytes / ring->total_packets;
+       }
+
+       if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
+               struct igb_ring *ring = q_vector->tx_ring;
+               avg_wire_size = max_t(u32, avg_wire_size,
+                                     (ring->total_bytes /
+                                      ring->total_packets));
+       }
+
+       /* if avg_wire_size isn't set no work was done */
+       if (!avg_wire_size)
+               goto clear_counts;
+
+       /* Add 24 bytes to size to account for CRC, preamble, and gap */
+       avg_wire_size += 24;
 
        /* Don't starve jumbo frames */
        avg_wire_size = min(avg_wire_size, 3000);
@@ -2483,14 +3263,24 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
        else
                new_val = avg_wire_size / 2;
 
+       /* when in itr mode 3 do not exceed 20K ints/sec */
+       if (adapter->rx_itr_setting == 3 && new_val < 196)
+               new_val = 196;
+
 set_itr_val:
-       if (new_val != rx_ring->itr_val) {
-               rx_ring->itr_val = new_val;
-               rx_ring->set_itr = 1;
+       if (new_val != q_vector->itr_val) {
+               q_vector->itr_val = new_val;
+               q_vector->set_itr = 1;
        }
 clear_counts:
-       rx_ring->total_bytes = 0;
-       rx_ring->total_packets = 0;
+       if (q_vector->rx_ring) {
+               q_vector->rx_ring->total_bytes = 0;
+               q_vector->rx_ring->total_packets = 0;
+       }
+       if (q_vector->tx_ring) {
+               q_vector->tx_ring->total_bytes = 0;
+               q_vector->tx_ring->total_packets = 0;
+       }
 }
 
 /**
@@ -2507,7 +3297,7 @@ clear_counts:
  *      NOTE:  These calculations are only valid when operating in a single-
  *             queue environment.
  * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
+ * @itr_setting: current q_vector->itr_val
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  **/
@@ -2547,7 +3337,7 @@ static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
                if (bytes > 25000) {
                        if (packets > 35)
                                retval = low_latency;
-               } else if (bytes < 6000) {
+               } else if (bytes < 1500) {
                        retval = low_latency;
                }
                break;
@@ -2559,8 +3349,9 @@ update_itr_done:
 
 static void igb_set_itr(struct igb_adapter *adapter)
 {
+       struct igb_q_vector *q_vector = adapter->q_vector[0];
        u16 current_itr;
-       u32 new_itr = adapter->itr;
+       u32 new_itr = q_vector->itr_val;
 
        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
        if (adapter->link_speed != SPEED_1000) {
@@ -2571,54 +3362,48 @@ static void igb_set_itr(struct igb_adapter *adapter)
 
        adapter->rx_itr = igb_update_itr(adapter,
                                    adapter->rx_itr,
-                                   adapter->rx_ring->total_packets,
-                                   adapter->rx_ring->total_bytes);
+                                   q_vector->rx_ring->total_packets,
+                                   q_vector->rx_ring->total_bytes);
 
-       if (adapter->rx_ring->buddy) {
-               adapter->tx_itr = igb_update_itr(adapter,
-                                           adapter->tx_itr,
-                                           adapter->tx_ring->total_packets,
-                                           adapter->tx_ring->total_bytes);
-
-               current_itr = max(adapter->rx_itr, adapter->tx_itr);
-       } else {
-               current_itr = adapter->rx_itr;
-       }
+       adapter->tx_itr = igb_update_itr(adapter,
+                                   adapter->tx_itr,
+                                   q_vector->tx_ring->total_packets,
+                                   q_vector->tx_ring->total_bytes);
+       current_itr = max(adapter->rx_itr, adapter->tx_itr);
 
        /* conservative mode (itr 3) eliminates the lowest_latency setting */
-       if (adapter->itr_setting == 3 &&
-           current_itr == lowest_latency)
+       if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
                current_itr = low_latency;
 
        switch (current_itr) {
        /* counts and packets in update_itr are dependent on these numbers */
        case lowest_latency:
-               new_itr = 70000;
+               new_itr = 56;  /* aka 70,000 ints/sec */
                break;
        case low_latency:
-               new_itr = 20000; /* aka hwitr = ~200 */
+               new_itr = 196; /* aka 20,000 ints/sec */
                break;
        case bulk_latency:
-               new_itr = 4000;
+               new_itr = 980; /* aka 4,000 ints/sec */
                break;
        default:
                break;
        }
 
 set_itr_now:
-       adapter->rx_ring->total_bytes = 0;
-       adapter->rx_ring->total_packets = 0;
-       if (adapter->rx_ring->buddy) {
-               adapter->rx_ring->buddy->total_bytes = 0;
-               adapter->rx_ring->buddy->total_packets = 0;
-       }
+       q_vector->rx_ring->total_bytes = 0;
+       q_vector->rx_ring->total_packets = 0;
+       q_vector->tx_ring->total_bytes = 0;
+       q_vector->tx_ring->total_packets = 0;
 
-       if (new_itr != adapter->itr) {
+       if (new_itr != q_vector->itr_val) {
                /* this attempts to bias the interrupt rate towards Bulk
                 * by adding intermediate steps when interrupt rate is
                 * increasing */
-               new_itr = new_itr > adapter->itr ?
-                            min(adapter->itr + (new_itr >> 2), new_itr) :
+               new_itr = new_itr > q_vector->itr_val ?
+                            max((new_itr * q_vector->itr_val) /
+                                (new_itr + (q_vector->itr_val >> 2)),
+                                new_itr) :
                             new_itr;
                /* Don't write the value here; it resets the adapter's
                 * internal timer, and causes us to delay far longer than
@@ -2626,24 +3411,22 @@ set_itr_now:
                 * value at the beginning of the next interrupt so the timing
                 * ends up being correct.
                 */
-               adapter->itr = new_itr;
-               adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
-               adapter->rx_ring->set_itr = 1;
+               q_vector->itr_val = new_itr;
+               q_vector->set_itr = 1;
        }
 
        return;
 }
 
-
 #define IGB_TX_FLAGS_CSUM              0x00000001
 #define IGB_TX_FLAGS_VLAN              0x00000002
 #define IGB_TX_FLAGS_TSO               0x00000004
 #define IGB_TX_FLAGS_IPV4              0x00000008
-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT        16
+#define IGB_TX_FLAGS_TSTAMP            0x00000010
+#define IGB_TX_FLAGS_VLAN_MASK         0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT                        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-                             struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
                              struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
        struct e1000_adv_tx_context_desc *context_desc;
@@ -2671,7 +3454,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
                                                         iph->daddr, 0,
                                                         IPPROTO_TCP,
                                                         0);
-       } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+       } else if (skb_is_gso_v6(skb)) {
                ipv6_hdr(skb)->payload_len = 0;
                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                       &ipv6_hdr(skb)->daddr,
@@ -2704,14 +3487,15 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
        mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
-       /* Context index must be unique per ring. */
-       if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-               mss_l4len_idx |= tx_ring->queue_index << 4;
+       /* For 82575, context index must be unique per ring. */
+       if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+               mss_l4len_idx |= tx_ring->reg_idx << 4;
 
        context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
        context_desc->seqnum_seed = 0;
 
        buffer_info->time_stamp = jiffies;
+       buffer_info->next_to_watch = i;
        buffer_info->dma = 0;
        i++;
        if (i == tx_ring->count)
@@ -2722,14 +3506,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-                                       struct igb_ring *tx_ring,
-                                       struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+                                  struct sk_buff *skb, u32 tx_flags)
 {
        struct e1000_adv_tx_context_desc *context_desc;
-       unsigned int i;
+       struct pci_dev *pdev = tx_ring->pdev;
        struct igb_buffer *buffer_info;
        u32 info = 0, tu_cmd = 0;
+       unsigned int i;
 
        if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
            (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -2739,6 +3523,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                if (tx_flags & IGB_TX_FLAGS_VLAN)
                        info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+
                info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        info |= skb_network_header_len(skb);
@@ -2748,20 +3533,35 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
                tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
-                       case __constant_htons(ETH_P_IP):
+                       __be16 protocol;
+
+                       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+                               const struct vlan_ethhdr *vhdr =
+                                         (const struct vlan_ethhdr*)skb->data;
+
+                               protocol = vhdr->h_vlan_encapsulated_proto;
+                       } else {
+                               protocol = skb->protocol;
+                       }
+
+                       switch (protocol) {
+                       case cpu_to_be16(ETH_P_IP):
                                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
                                        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+                               else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
+                                       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
                                break;
-                       case __constant_htons(ETH_P_IPV6):
+                       case cpu_to_be16(ETH_P_IPV6):
                                /* XXX what about other V6 headers?? */
                                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
                                        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+                               else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
+                                       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
                                break;
                        default:
                                if (unlikely(net_ratelimit()))
-                                       dev_warn(&adapter->pdev->dev,
+                                       dev_warn(&pdev->dev,
                                            "partial checksum but proto=%x!\n",
                                            skb->protocol);
                                break;
@@ -2770,11 +3570,12 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
                context_desc->seqnum_seed = 0;
-               if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+               if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
                        context_desc->mss_l4len_idx =
-                               cpu_to_le32(tx_ring->queue_index << 4);
+                               cpu_to_le32(tx_ring->reg_idx << 4);
 
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
                buffer_info->dma = 0;
 
                i++;
@@ -2784,19 +3585,17 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                return true;
        }
-
-
        return false;
 }
 
 #define IGB_MAX_TXD_PWR        16
 #define IGB_MAX_DATA_PER_TXD   (1<<IGB_MAX_TXD_PWR)
 
-static inline int igb_tx_map_adv(struct igb_adapter *adapter,
-                                struct igb_ring *tx_ring,
-                                struct sk_buff *skb)
+static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
+                                unsigned int first)
 {
        struct igb_buffer *buffer_info;
+       struct pci_dev *pdev = tx_ring->pdev;
        unsigned int len = skb_headlen(skb);
        unsigned int count = 0, i;
        unsigned int f;
@@ -2808,16 +3607,20 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
        buffer_info->length = len;
        /* set time_stamp *before* dma to help avoid a possible race */
        buffer_info->time_stamp = jiffies;
-       buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
+       buffer_info->next_to_watch = i;
+       buffer_info->dma = pci_map_single(pdev, skb->data, len,
                                          PCI_DMA_TODEVICE);
-       count++;
-       i++;
-       if (i == tx_ring->count)
-               i = 0;
+       if (pci_dma_mapping_error(pdev, buffer_info->dma))
+               goto dma_error;
 
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
                struct skb_frag_struct *frag;
 
+               count++;
+               i++;
+               if (i == tx_ring->count)
+                       i = 0;
+
                frag = &skb_shinfo(skb)->frags[f];
                len = frag->size;
 
@@ -2825,33 +3628,55 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
                BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
                buffer_info->length = len;
                buffer_info->time_stamp = jiffies;
-               buffer_info->dma = pci_map_page(adapter->pdev,
+               buffer_info->next_to_watch = i;
+               buffer_info->mapped_as_page = true;
+               buffer_info->dma = pci_map_page(pdev,
                                                frag->page,
                                                frag->page_offset,
                                                len,
                                                PCI_DMA_TODEVICE);
+               if (pci_dma_mapping_error(pdev, buffer_info->dma))
+                       goto dma_error;
 
-               count++;
-               i++;
-               if (i == tx_ring->count)
-                       i = 0;
        }
 
-       i = (i == 0) ? tx_ring->count - 1 : i - 1;
        tx_ring->buffer_info[i].skb = skb;
+       tx_ring->buffer_info[first].next_to_watch = i;
 
-       return count;
+       return ++count;
+
+dma_error:
+       dev_err(&pdev->dev, "TX DMA map failed\n");
+
+       /* clear timestamp and dma mappings for failed buffer_info mapping */
+       buffer_info->dma = 0;
+       buffer_info->time_stamp = 0;
+       buffer_info->length = 0;
+       buffer_info->next_to_watch = 0;
+       buffer_info->mapped_as_page = false;
+       count--;
+
+       /* clear timestamp and dma mappings for remaining portion of packet */
+       while (count >= 0) {
+               count--;
+               i--;
+               if (i < 0)
+                       i += tx_ring->count;
+               buffer_info = &tx_ring->buffer_info[i];
+               igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+       }
+
+       return 0;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-                                   struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
                                    int tx_flags, int count, u32 paylen,
                                    u8 hdr_len)
 {
-       union e1000_adv_tx_desc *tx_desc = NULL;
+       union e1000_adv_tx_desc *tx_desc;
        struct igb_buffer *buffer_info;
        u32 olinfo_status = 0, cmd_type_len;
-       unsigned int i;
+       unsigned int i = tx_ring->next_to_use;
 
        cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
                        E1000_ADVTXD_DCMD_DEXT);
@@ -2859,6 +3684,9 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
        if (tx_flags & IGB_TX_FLAGS_VLAN)
                cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
 
+       if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+               cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+
        if (tx_flags & IGB_TX_FLAGS_TSO) {
                cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
 
@@ -2873,27 +3701,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
        }
 
-       if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-           (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+       if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+           (tx_flags & (IGB_TX_FLAGS_CSUM |
+                        IGB_TX_FLAGS_TSO |
                         IGB_TX_FLAGS_VLAN)))
-               olinfo_status |= tx_ring->queue_index << 4;
+               olinfo_status |= tx_ring->reg_idx << 4;
 
        olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
-       i = tx_ring->next_to_use;
-       while (count--) {
+       do {
                buffer_info = &tx_ring->buffer_info[i];
                tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
                tx_desc->read.cmd_type_len =
                        cpu_to_le32(cmd_type_len | buffer_info->length);
                tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+               count--;
                i++;
                if (i == tx_ring->count)
                        i = 0;
-       }
+       } while (count > 0);
 
-       tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+       tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -2901,16 +3730,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
        /* we need this if more than one processor can write to our tail
         * at a time, it syncronizes IO on IA64/Altix systems */
        mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-                              struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct net_device *netdev = tx_ring->netdev;
 
        netif_stop_subqueue(netdev, tx_ring->queue_index);
 
@@ -2921,59 +3749,48 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
        /* We need to check again in a case another CPU has just
         * made room available. */
-       if (IGB_DESC_UNUSED(tx_ring) < size)
+       if (igb_desc_unused(tx_ring) < size)
                return -EBUSY;
 
        /* A reprieve! */
        netif_wake_subqueue(netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       tx_ring->tx_stats.restart_queue++;
        return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-                            struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-       if (IGB_DESC_UNUSED(tx_ring) >= size)
+       if (igb_desc_unused(tx_ring) >= size)
                return 0;
-       return __igb_maybe_stop_tx(netdev, tx_ring, size);
+       return __igb_maybe_stop_tx(tx_ring, size);
 }
 
-#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
-
-static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
-                                  struct net_device *netdev,
-                                  struct igb_ring *tx_ring)
+netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
+                                   struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+       unsigned int first;
        unsigned int tx_flags = 0;
-       unsigned int len;
        u8 hdr_len = 0;
-       int tso = 0;
-
-       len = skb_headlen(skb);
-
-       if (test_bit(__IGB_DOWN, &adapter->state)) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
-
-       if (skb->len <= 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
+       int tso = 0, count;
+       union skb_shared_tx *shtx = skb_tx(skb);
 
        /* need: 1 descriptor per page,
         *       + 2 desc gap to keep tail from touching head,
         *       + 1 desc for skb->data,
         *       + 1 desc for context descriptor,
         * otherwise try next time */
-       if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+       if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
                /* this is a hard error */
                return NETDEV_TX_BUSY;
        }
-       skb_orphan(skb);
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (unlikely(shtx->hardware)) {
+               shtx->in_progress = 1;
+               tx_flags |= IGB_TX_FLAGS_TSTAMP;
+       }
+
+       if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
        }
@@ -2981,46 +3798,67 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
        if (skb->protocol == htons(ETH_P_IP))
                tx_flags |= IGB_TX_FLAGS_IPV4;
 
-       tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-                                             &hdr_len) : 0;
+       first = tx_ring->next_to_use;
+       if (skb_is_gso(skb)) {
+               tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
 
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
        }
 
        if (tso)
                tx_flags |= IGB_TX_FLAGS_TSO;
-       else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
-                       if (skb->ip_summed == CHECKSUM_PARTIAL)
-                               tx_flags |= IGB_TX_FLAGS_CSUM;
+       else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
+                (skb->ip_summed == CHECKSUM_PARTIAL))
+               tx_flags |= IGB_TX_FLAGS_CSUM;
 
-       igb_tx_queue_adv(adapter, tx_ring, tx_flags,
-                        igb_tx_map_adv(adapter, tx_ring, skb),
-                        skb->len, hdr_len);
+       /*
+        * count reflects descriptors mapped, if 0 or less then mapping error
+        * has occured and we need to rewind the descriptor queue
+        */
+       count = igb_tx_map_adv(tx_ring, skb, first);
+       if (!count) {
+               dev_kfree_skb_any(skb);
+               tx_ring->buffer_info[first].time_stamp = 0;
+               tx_ring->next_to_use = first;
+               return NETDEV_TX_OK;
+       }
 
-       netdev->trans_start = jiffies;
+       igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
 
        /* Make sure there is space in the ring for the next send. */
-       igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+       igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
 
        return NETDEV_TX_OK;
 }
 
-static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
+                                     struct net_device *netdev)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct igb_ring *tx_ring;
-
        int r_idx = 0;
-       r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
+
+       if (test_bit(__IGB_DOWN, &adapter->state)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       if (skb->len <= 0) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
        tx_ring = adapter->multi_tx_table[r_idx];
 
        /* This goes back to the question of how to logically map a tx queue
         * to a flow.  Right now, performance is impacted slightly negatively
         * if using multiple tx queues.  If the stack breaks away from a
         * single qdisc implementation, we can look at this again. */
-       return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
+       return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -3034,9 +3872,13 @@ static void igb_tx_timeout(struct net_device *netdev)
 
        /* Do the reset outside of interrupt context */
        adapter->tx_timeout_count++;
+
+       if (hw->mac.type == e1000_82580)
+               hw->dev_spec._82575.global_device_reset = true;
+
        schedule_work(&adapter->reset_task);
-       wr32(E1000_EICS, adapter->eims_enable_mask &
-               ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
+       wr32(E1000_EICS,
+            (adapter->eims_enable_mask & ~adapter->eims_other));
 }
 
 static void igb_reset_task(struct work_struct *work)
@@ -3054,13 +3896,10 @@ static void igb_reset_task(struct work_struct *work)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the timer callback.
  **/
-static struct net_device_stats *
-igb_get_stats(struct net_device *netdev)
+static struct net_device_stats *igb_get_stats(struct net_device *netdev)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
-
        /* only return the current stats */
-       return &adapter->net_stats;
+       return &netdev->stats;
 }
 
 /**
@@ -3073,26 +3912,25 @@ igb_get_stats(struct net_device *netdev)
 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
+       struct pci_dev *pdev = adapter->pdev;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       u32 rx_buffer_len, i;
 
-       if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-           (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-               dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+       if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+               dev_err(&pdev->dev, "Invalid MTU setting\n");
                return -EINVAL;
        }
 
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
        if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-               dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
+               dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
                return -EINVAL;
        }
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
                msleep(1);
+
        /* igb_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
-       if (netif_running(netdev))
-               igb_down(adapter);
 
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
@@ -3100,29 +3938,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
         * i.e. RXBUFFER_2048 --> size-4096 slab
         */
 
-       if (max_frame <= IGB_RXBUFFER_256)
-               adapter->rx_buffer_len = IGB_RXBUFFER_256;
-       else if (max_frame <= IGB_RXBUFFER_512)
-               adapter->rx_buffer_len = IGB_RXBUFFER_512;
-       else if (max_frame <= IGB_RXBUFFER_1024)
-               adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-       else if (max_frame <= IGB_RXBUFFER_2048)
-               adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+       if (max_frame <= IGB_RXBUFFER_1024)
+               rx_buffer_len = IGB_RXBUFFER_1024;
+       else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
+               rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
        else
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-               adapter->rx_buffer_len = IGB_RXBUFFER_16384;
-#else
-               adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
-       /* adjust allocation if LPE protects us, and we aren't using SBP */
-       if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-            (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
-               adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+               rx_buffer_len = IGB_RXBUFFER_128;
 
-       dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
+       if (netif_running(netdev))
+               igb_down(adapter);
+
+       dev_info(&pdev->dev, "changing MTU from %d to %d\n",
                 netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
 
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
+
        if (netif_running(netdev))
                igb_up(adapter);
        else
@@ -3140,9 +3972,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 
 void igb_update_stats(struct igb_adapter *adapter)
 {
+       struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
+       u32 rnbc;
        u16 phy_tmp;
+       int i;
+       u64 bytes, packets;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
@@ -3155,6 +3991,31 @@ void igb_update_stats(struct igb_adapter *adapter)
        if (pci_channel_offline(pdev))
                return;
 
+       bytes = 0;
+       packets = 0;
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+               struct igb_ring *ring = adapter->rx_ring[i];
+               ring->rx_stats.drops += rqdpc_tmp;
+               net_stats->rx_fifo_errors += rqdpc_tmp;
+               bytes += ring->rx_stats.bytes;
+               packets += ring->rx_stats.packets;
+       }
+
+       net_stats->rx_bytes = bytes;
+       net_stats->rx_packets = packets;
+
+       bytes = 0;
+       packets = 0;
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct igb_ring *ring = adapter->tx_ring[i];
+               bytes += ring->tx_stats.bytes;
+               packets += ring->tx_stats.packets;
+       }
+       net_stats->tx_bytes = bytes;
+       net_stats->tx_packets = packets;
+
+       /* read stats registers */
        adapter->stats.crcerrs += rd32(E1000_CRCERRS);
        adapter->stats.gprc += rd32(E1000_GPRC);
        adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3187,7 +4048,9 @@ void igb_update_stats(struct igb_adapter *adapter)
        adapter->stats.gptc += rd32(E1000_GPTC);
        adapter->stats.gotc += rd32(E1000_GOTCL);
        rd32(E1000_GOTCH); /* clear GOTCL */
-       adapter->stats.rnbc += rd32(E1000_RNBC);
+       rnbc = rd32(E1000_RNBC);
+       adapter->stats.rnbc += rnbc;
+       net_stats->rx_fifo_errors += rnbc;
        adapter->stats.ruc += rd32(E1000_RUC);
        adapter->stats.rfc += rd32(E1000_RFC);
        adapter->stats.rjc += rd32(E1000_RJC);
@@ -3205,12 +4068,8 @@ void igb_update_stats(struct igb_adapter *adapter)
        adapter->stats.mptc += rd32(E1000_MPTC);
        adapter->stats.bptc += rd32(E1000_BPTC);
 
-       /* used for adaptive IFS */
-
-       hw->mac.tx_packet_delta = rd32(E1000_TPT);
-       adapter->stats.tpt += hw->mac.tx_packet_delta;
-       hw->mac.collision_delta = rd32(E1000_COLC);
-       adapter->stats.colc += hw->mac.collision_delta;
+       adapter->stats.tpt += rd32(E1000_TPT);
+       adapter->stats.colc += rd32(E1000_COLC);
 
        adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
        adapter->stats.rxerrc += rd32(E1000_RXERRC);
@@ -3229,37 +4088,36 @@ void igb_update_stats(struct igb_adapter *adapter)
        adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
 
        /* Fill out the OS statistics structure */
-       adapter->net_stats.multicast = adapter->stats.mprc;
-       adapter->net_stats.collisions = adapter->stats.colc;
+       net_stats->multicast = adapter->stats.mprc;
+       net_stats->collisions = adapter->stats.colc;
 
        /* Rx Errors */
 
        /* RLEC on some newer hardware can be incorrect so build
-       * our own version based on RUC and ROC */
-       adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+        * our own version based on RUC and ROC */
+       net_stats->rx_errors = adapter->stats.rxerrc +
                adapter->stats.crcerrs + adapter->stats.algnerrc +
                adapter->stats.ruc + adapter->stats.roc +
                adapter->stats.cexterr;
-       adapter->net_stats.rx_length_errors = adapter->stats.ruc +
-                                             adapter->stats.roc;
-       adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-       adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-       adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+       net_stats->rx_length_errors = adapter->stats.ruc +
+                                     adapter->stats.roc;
+       net_stats->rx_crc_errors = adapter->stats.crcerrs;
+       net_stats->rx_frame_errors = adapter->stats.algnerrc;
+       net_stats->rx_missed_errors = adapter->stats.mpc;
 
        /* Tx Errors */
-       adapter->net_stats.tx_errors = adapter->stats.ecol +
-                                      adapter->stats.latecol;
-       adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
-       adapter->net_stats.tx_window_errors = adapter->stats.latecol;
-       adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+       net_stats->tx_errors = adapter->stats.ecol +
+                              adapter->stats.latecol;
+       net_stats->tx_aborted_errors = adapter->stats.ecol;
+       net_stats->tx_window_errors = adapter->stats.latecol;
+       net_stats->tx_carrier_errors = adapter->stats.tncrs;
 
        /* Tx Dropped needs to be maintained elsewhere */
 
        /* Phy Stats */
        if (hw->phy.media_type == e1000_media_type_copper) {
                if ((adapter->link_speed == SPEED_1000) &&
-                  (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
-                                             &phy_tmp))) {
+                  (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
                        adapter->phy_stats.idle_errors += phy_tmp;
                }
@@ -3271,161 +4129,134 @@ void igb_update_stats(struct igb_adapter *adapter)
        adapter->stats.mgpdc += rd32(E1000_MGTPDC);
 }
 
-
 static irqreturn_t igb_msix_other(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = data;
        struct e1000_hw *hw = &adapter->hw;
        u32 icr = rd32(E1000_ICR);
-
        /* reading ICR causes bit 31 of EICR to be cleared */
-       if (!(icr & E1000_ICR_LSC))
-               goto no_link_interrupt;
-       hw->mac.get_link_status = 1;
-       /* guard against interrupt when we're going down */
-       if (!test_bit(__IGB_DOWN, &adapter->state))
-               mod_timer(&adapter->watchdog_timer, jiffies + 1);
-       
-no_link_interrupt:
-       wr32(E1000_IMS, E1000_IMS_LSC);
-       wr32(E1000_EIMS, adapter->eims_other);
 
-       return IRQ_HANDLED;
-}
+       if (icr & E1000_ICR_DRSTA)
+               schedule_work(&adapter->reset_task);
 
-static irqreturn_t igb_msix_tx(int irq, void *data)
-{
-       struct igb_ring *tx_ring = data;
-       struct igb_adapter *adapter = tx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
+       if (icr & E1000_ICR_DOUTSYNC) {
+               /* HW is reporting DMA is out of sync */
+               adapter->stats.doosync++;
+       }
 
-#ifdef CONFIG_IGB_DCA
-       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_tx_dca(tx_ring);
-#endif
-       tx_ring->total_bytes = 0;
-       tx_ring->total_packets = 0;
-
-       /* auto mask will automatically reenable the interrupt when we write
-        * EICS */
-       if (!igb_clean_tx_irq(tx_ring))
-               /* Ring was not completely cleaned, so fire another interrupt */
-               wr32(E1000_EICS, tx_ring->eims_value);
+       /* Check for a mailbox event */
+       if (icr & E1000_ICR_VMMB)
+               igb_msg_task(adapter);
+
+       if (icr & E1000_ICR_LSC) {
+               hw->mac.get_link_status = 1;
+               /* guard against interrupt when we're going down */
+               if (!test_bit(__IGB_DOWN, &adapter->state))
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
+       }
+
+       if (adapter->vfs_allocated_count)
+               wr32(E1000_IMS, E1000_IMS_LSC |
+                               E1000_IMS_VMMB |
+                               E1000_IMS_DOUTSYNC);
        else
-               wr32(E1000_EIMS, tx_ring->eims_value);
+               wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
+       wr32(E1000_EIMS, adapter->eims_other);
 
        return IRQ_HANDLED;
 }
 
-static void igb_write_itr(struct igb_ring *ring)
+static void igb_write_itr(struct igb_q_vector *q_vector)
 {
-       struct e1000_hw *hw = &ring->adapter->hw;
-       if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
-               switch (hw->mac.type) {
-               case e1000_82576:
-                       wr32(ring->itr_register,
-                            ring->itr_val |
-                            0x80000000);
-                       break;
-               default:
-                       wr32(ring->itr_register,
-                            ring->itr_val |
-                            (ring->itr_val << 16));
-                       break;
-               }
-               ring->set_itr = 0;
-       }
+       struct igb_adapter *adapter = q_vector->adapter;
+       u32 itr_val = q_vector->itr_val & 0x7FFC;
+
+       if (!q_vector->set_itr)
+               return;
+
+       if (!itr_val)
+               itr_val = 0x4;
+
+       if (adapter->hw.mac.type == e1000_82575)
+               itr_val |= itr_val << 16;
+       else
+               itr_val |= 0x8000000;
+
+       writel(itr_val, q_vector->itr_register);
+       q_vector->set_itr = 0;
 }
 
-static irqreturn_t igb_msix_rx(int irq, void *data)
+static irqreturn_t igb_msix_ring(int irq, void *data)
 {
-       struct igb_ring *rx_ring = data;
-       struct igb_adapter *adapter = rx_ring->adapter;
-
-       /* Write the ITR value calculated at the end of the
-        * previous interrupt.
-        */
+       struct igb_q_vector *q_vector = data;
 
-       igb_write_itr(rx_ring);
+       /* Write the ITR value calculated from the previous interrupt. */
+       igb_write_itr(q_vector);
 
-       if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
-               __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
+       napi_schedule(&q_vector->napi);
 
-#ifdef CONFIG_IGB_DCA
-       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_rx_dca(rx_ring);
-#endif
-               return IRQ_HANDLED;
+       return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *rx_ring)
+static void igb_update_dca(struct igb_q_vector *q_vector)
 {
-       u32 dca_rxctrl;
-       struct igb_adapter *adapter = rx_ring->adapter;
+       struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
        int cpu = get_cpu();
-       int q = rx_ring - adapter->rx_ring;
 
-       if (rx_ring->cpu != cpu) {
-               dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-               if (hw->mac.type == e1000_82576) {
-                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
-                       dca_rxctrl |= dca_get_tag(cpu) <<
-                                     E1000_DCA_RXCTRL_CPUID_SHIFT;
+       if (q_vector->cpu == cpu)
+               goto out_no_update;
+
+       if (q_vector->tx_ring) {
+               int q = q_vector->tx_ring->reg_idx;
+               u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
+               if (hw->mac.type == e1000_82575) {
+                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
                } else {
+                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+                       dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                                     E1000_DCA_TXCTRL_CPUID_SHIFT;
+               }
+               dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
+               wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
+       }
+       if (q_vector->rx_ring) {
+               int q = q_vector->rx_ring->reg_idx;
+               u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
+               if (hw->mac.type == e1000_82575) {
                        dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
-                       dca_rxctrl |= dca_get_tag(cpu);
+                       dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               } else {
+                       dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
+                       dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                                     E1000_DCA_RXCTRL_CPUID_SHIFT;
                }
                dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
                dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
                dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
                wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
-               rx_ring->cpu = cpu;
-       }
-       put_cpu();
-}
-
-static void igb_update_tx_dca(struct igb_ring *tx_ring)
-{
-       u32 dca_txctrl;
-       struct igb_adapter *adapter = tx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       int cpu = get_cpu();
-       int q = tx_ring - adapter->tx_ring;
-
-       if (tx_ring->cpu != cpu) {
-               dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-               if (hw->mac.type == e1000_82576) {
-                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
-                       dca_txctrl |= dca_get_tag(cpu) <<
-                                     E1000_DCA_TXCTRL_CPUID_SHIFT;
-               } else {
-                       dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-                       dca_txctrl |= dca_get_tag(cpu);
-               }
-               dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
-               wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
-               tx_ring->cpu = cpu;
        }
+       q_vector->cpu = cpu;
+out_no_update:
        put_cpu();
 }
 
 static void igb_setup_dca(struct igb_adapter *adapter)
 {
+       struct e1000_hw *hw = &adapter->hw;
        int i;
 
        if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
                return;
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i].cpu = -1;
-               igb_update_tx_dca(&adapter->tx_ring[i]);
-       }
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i].cpu = -1;
-               igb_update_rx_dca(&adapter->rx_ring[i]);
+       /* Always use CB2 mode, difference is masked in the CB driver. */
+       wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               adapter->q_vector[i]->cpu = -1;
+               igb_update_dca(adapter->q_vector[i]);
        }
 }
 
@@ -3433,23 +4264,18 @@ static int __igb_notify_dca(struct device *dev, void *data)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
        struct igb_adapter *adapter = netdev_priv(netdev);
+       struct pci_dev *pdev = adapter->pdev;
        struct e1000_hw *hw = &adapter->hw;
        unsigned long event = *(unsigned long *)data;
 
-       if (!(adapter->flags & IGB_FLAG_HAS_DCA))
-               goto out;
-
        switch (event) {
        case DCA_PROVIDER_ADD:
                /* if already enabled, don't do it again */
                if (adapter->flags & IGB_FLAG_DCA_ENABLED)
                        break;
-               adapter->flags |= IGB_FLAG_DCA_ENABLED;
-               /* Always use CB2 mode, difference is masked
-                * in the CB driver. */
-               wr32(E1000_DCA_CTRL, 2);
                if (dca_add_requester(dev) == 0) {
-                       dev_info(&adapter->pdev->dev, "DCA enabled\n");
+                       adapter->flags |= IGB_FLAG_DCA_ENABLED;
+                       dev_info(&pdev->dev, "DCA enabled\n");
                        igb_setup_dca(adapter);
                        break;
                }
@@ -3457,15 +4283,15 @@ static int __igb_notify_dca(struct device *dev, void *data)
        case DCA_PROVIDER_REMOVE:
                if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
                        /* without this a class_device is left
-                        * hanging around in the sysfs model */
+                        * hanging around in the sysfs model */
                        dca_remove_requester(dev);
-                       dev_info(&adapter->pdev->dev, "DCA disabled\n");
+                       dev_info(&pdev->dev, "DCA disabled\n");
                        adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
-                       wr32(E1000_DCA_CTRL, 1);
+                       wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
                }
                break;
        }
-out:
+
        return 0;
 }
 
@@ -3481,6 +4307,503 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
 }
 #endif /* CONFIG_IGB_DCA */
 
+static void igb_ping_all_vfs(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 ping;
+       int i;
+
+       for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
+               ping = E1000_PF_CONTROL_MSG;
+               if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
+                       ping |= E1000_VT_MSGTYPE_CTS;
+               igb_write_mbx(hw, &ping, 1, i);
+       }
+}
+
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vmolr = rd32(E1000_VMOLR(vf));
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+       vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
+                           IGB_VF_FLAG_MULTI_PROMISC);
+       vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+       if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+               vmolr |= E1000_VMOLR_MPME;
+               *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+       } else {
+               /*
+                * if we have hashes and we are clearing a multicast promisc
+                * flag we need to write the hashes to the MTA as this step
+                * was previously skipped
+                */
+               if (vf_data->num_vf_mc_hashes > 30) {
+                       vmolr |= E1000_VMOLR_MPME;
+               } else if (vf_data->num_vf_mc_hashes) {
+                       int j;
+                       vmolr |= E1000_VMOLR_ROMPE;
+                       for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+                               igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+               }
+       }
+
+       wr32(E1000_VMOLR(vf), vmolr);
+
+       /* there are flags left unprocessed, likely not supported */
+       if (*msgbuf & E1000_VT_MSGINFO_MASK)
+               return -EINVAL;
+
+       return 0;
+
+}
+
+static int igb_set_vf_multicasts(struct igb_adapter *adapter,
+                                 u32 *msgbuf, u32 vf)
+{
+       int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+       u16 *hash_list = (u16 *)&msgbuf[1];
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+       int i;
+
+       /* salt away the number of multicast addresses assigned
+        * to this VF for later use to restore when the PF multi cast
+        * list changes
+        */
+       vf_data->num_vf_mc_hashes = n;
+
+       /* only up to 30 hash values supported */
+       if (n > 30)
+               n = 30;
+
+       /* store the hashes for later use */
+       for (i = 0; i < n; i++)
+               vf_data->vf_mc_hashes[i] = hash_list[i];
+
+       /* Flush and reset the mta with the new values */
+       igb_set_rx_mode(adapter->netdev);
+
+       return 0;
+}
+
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct vf_data_storage *vf_data;
+       int i, j;
+
+       for (i = 0; i < adapter->vfs_allocated_count; i++) {
+               u32 vmolr = rd32(E1000_VMOLR(i));
+               vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+               vf_data = &adapter->vf_data[i];
+
+               if ((vf_data->num_vf_mc_hashes > 30) ||
+                   (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+                       vmolr |= E1000_VMOLR_MPME;
+               } else if (vf_data->num_vf_mc_hashes) {
+                       vmolr |= E1000_VMOLR_ROMPE;
+                       for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+                               igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+               }
+               wr32(E1000_VMOLR(i), vmolr);
+       }
+}
+
+static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 pool_mask, reg, vid;
+       int i;
+
+       pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+       /* Find the vlan filter for this id */
+       for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+               reg = rd32(E1000_VLVF(i));
+
+               /* remove the vf from the pool */
+               reg &= ~pool_mask;
+
+               /* if pool is empty then remove entry from vfta */
+               if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
+                   (reg & E1000_VLVF_VLANID_ENABLE)) {
+                       reg = 0;
+                       vid = reg & E1000_VLVF_VLANID_MASK;
+                       igb_vfta_set(hw, vid, false);
+               }
+
+               wr32(E1000_VLVF(i), reg);
+       }
+
+       adapter->vf_data[vf].vlans_enabled = 0;
+}
+
+static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 reg, i;
+
+       /* The vlvf table only exists on 82576 hardware and newer */
+       if (hw->mac.type < e1000_82576)
+               return -1;
+
+       /* we only need to do this if VMDq is enabled */
+       if (!adapter->vfs_allocated_count)
+               return -1;
+
+       /* Find the vlan filter for this id */
+       for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+               reg = rd32(E1000_VLVF(i));
+               if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+                   vid == (reg & E1000_VLVF_VLANID_MASK))
+                       break;
+       }
+
+       if (add) {
+               if (i == E1000_VLVF_ARRAY_SIZE) {
+                       /* Did not find a matching VLAN ID entry that was
+                        * enabled.  Search for a free filter entry, i.e.
+                        * one without the enable bit set
+                        */
+                       for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+                               reg = rd32(E1000_VLVF(i));
+                               if (!(reg & E1000_VLVF_VLANID_ENABLE))
+                                       break;
+                       }
+               }
+               if (i < E1000_VLVF_ARRAY_SIZE) {
+                       /* Found an enabled/available entry */
+                       reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+                       /* if !enabled we need to set this up in vfta */
+                       if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
+                               /* add VID to filter table */
+                               igb_vfta_set(hw, vid, true);
+                               reg |= E1000_VLVF_VLANID_ENABLE;
+                       }
+                       reg &= ~E1000_VLVF_VLANID_MASK;
+                       reg |= vid;
+                       wr32(E1000_VLVF(i), reg);
+
+                       /* do not modify RLPML for PF devices */
+                       if (vf >= adapter->vfs_allocated_count)
+                               return 0;
+
+                       if (!adapter->vf_data[vf].vlans_enabled) {
+                               u32 size;
+                               reg = rd32(E1000_VMOLR(vf));
+                               size = reg & E1000_VMOLR_RLPML_MASK;
+                               size += 4;
+                               reg &= ~E1000_VMOLR_RLPML_MASK;
+                               reg |= size;
+                               wr32(E1000_VMOLR(vf), reg);
+                       }
+
+                       adapter->vf_data[vf].vlans_enabled++;
+                       return 0;
+               }
+       } else {
+               if (i < E1000_VLVF_ARRAY_SIZE) {
+                       /* remove vf from the pool */
+                       reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
+                       /* if pool is empty then remove entry from vfta */
+                       if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
+                               reg = 0;
+                               igb_vfta_set(hw, vid, false);
+                       }
+                       wr32(E1000_VLVF(i), reg);
+
+                       /* do not modify RLPML for PF devices */
+                       if (vf >= adapter->vfs_allocated_count)
+                               return 0;
+
+                       adapter->vf_data[vf].vlans_enabled--;
+                       if (!adapter->vf_data[vf].vlans_enabled) {
+                               u32 size;
+                               reg = rd32(E1000_VMOLR(vf));
+                               size = reg & E1000_VMOLR_RLPML_MASK;
+                               size -= 4;
+                               reg &= ~E1000_VMOLR_RLPML_MASK;
+                               reg |= size;
+                               wr32(E1000_VMOLR(vf), reg);
+                       }
+               }
+       }
+       return 0;
+}
+
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       if (vid)
+               wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+       else
+               wr32(E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+                              int vf, u16 vlan, u8 qos)
+{
+       int err = 0;
+       struct igb_adapter *adapter = netdev_priv(netdev);
+
+       if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+               return -EINVAL;
+       if (vlan || qos) {
+               err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+               if (err)
+                       goto out;
+               igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+               igb_set_vmolr(adapter, vf, !vlan);
+               adapter->vf_data[vf].pf_vlan = vlan;
+               adapter->vf_data[vf].pf_qos = qos;
+               dev_info(&adapter->pdev->dev,
+                        "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+               if (test_bit(__IGB_DOWN, &adapter->state)) {
+                       dev_warn(&adapter->pdev->dev,
+                                "The VF VLAN has been set,"
+                                " but the PF device is not up.\n");
+                       dev_warn(&adapter->pdev->dev,
+                                "Bring the PF device up before"
+                                " attempting to use the VF device.\n");
+               }
+       } else {
+               igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+                                  false, vf);
+               igb_set_vmvir(adapter, vlan, vf);
+               igb_set_vmolr(adapter, vf, true);
+               adapter->vf_data[vf].pf_vlan = 0;
+               adapter->vf_data[vf].pf_qos = 0;
+       }
+out:
+       return err;
+}
+
+static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+       int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+       int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+
+       return igb_vlvf_set(adapter, vid, add, vf);
+}
+
+static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
+{
+       /* clear flags */
+       adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
+       adapter->vf_data[vf].last_nack = jiffies;
+
+       /* reset offloads to defaults */
+       igb_set_vmolr(adapter, vf, true);
+
+       /* reset vlans for device */
+       igb_clear_vf_vfta(adapter, vf);
+       if (adapter->vf_data[vf].pf_vlan)
+               igb_ndo_set_vf_vlan(adapter->netdev, vf,
+                                   adapter->vf_data[vf].pf_vlan,
+                                   adapter->vf_data[vf].pf_qos);
+       else
+               igb_clear_vf_vfta(adapter, vf);
+
+       /* reset multicast table array for vf */
+       adapter->vf_data[vf].num_vf_mc_hashes = 0;
+
+       /* Flush and reset the mta with the new values */
+       igb_set_rx_mode(adapter->netdev);
+}
+
+static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+       unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+       /* generate a new mac address as we were hotplug removed/added */
+       if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+               random_ether_addr(vf_mac);
+
+       /* process remaining reset events */
+       igb_vf_reset(adapter, vf);
+}
+
+static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+       int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+       u32 reg, msgbuf[3];
+       u8 *addr = (u8 *)(&msgbuf[1]);
+
+       /* process all the same items cleared in a function level reset */
+       igb_vf_reset(adapter, vf);
+
+       /* set vf mac address */
+       igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
+
+       /* enable transmit and receive for vf */
+       reg = rd32(E1000_VFTE);
+       wr32(E1000_VFTE, reg | (1 << vf));
+       reg = rd32(E1000_VFRE);
+       wr32(E1000_VFRE, reg | (1 << vf));
+
+       adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
+
+       /* reply to reset with ack and vf mac address */
+       msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+       memcpy(addr, vf_mac, 6);
+       igb_write_mbx(hw, msgbuf, 3, vf);
+}
+
+static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
+{
+       unsigned char *addr = (char *)&msg[1];
+       int err = -1;
+
+       if (is_valid_ether_addr(addr))
+               err = igb_set_vf_mac(adapter, vf, addr);
+
+       return err;
+}
+
+static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+       u32 msg = E1000_VT_MSGTYPE_NACK;
+
+       /* if device isn't clear to send it shouldn't be reading either */
+       if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+           time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+               igb_write_mbx(hw, &msg, 1, vf);
+               vf_data->last_nack = jiffies;
+       }
+}
+
+static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       u32 msgbuf[E1000_VFMAILBOX_SIZE];
+       struct e1000_hw *hw = &adapter->hw;
+       struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+       s32 retval;
+
+       retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
+
+       if (retval) {
+               /* if receive failed revoke VF CTS stats and restart init */
+               dev_err(&pdev->dev, "Error receiving message from VF\n");
+               vf_data->flags &= ~IGB_VF_FLAG_CTS;
+               if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+                       return;
+               goto out;
+       }
+
+       /* this is a message we already processed, do nothing */
+       if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+               return;
+
+       /*
+        * until the vf completes a reset it should not be
+        * allowed to start any configuration.
+        */
+
+       if (msgbuf[0] == E1000_VF_RESET) {
+               igb_vf_reset_msg(adapter, vf);
+               return;
+       }
+
+       if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+               if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+                       return;
+               retval = -1;
+               goto out;
+       }
+
+       switch ((msgbuf[0] & 0xFFFF)) {
+       case E1000_VF_SET_MAC_ADDR:
+               retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+               break;
+       case E1000_VF_SET_PROMISC:
+               retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+               break;
+       case E1000_VF_SET_MULTICAST:
+               retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
+               break;
+       case E1000_VF_SET_LPE:
+               retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
+               break;
+       case E1000_VF_SET_VLAN:
+               if (adapter->vf_data[vf].pf_vlan)
+                       retval = -1;
+               else
+                       retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+               break;
+       default:
+               dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+               retval = -1;
+               break;
+       }
+
+       msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+out:
+       /* notify the VF of the results of what it sent us */
+       if (retval)
+               msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+       else
+               msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+       igb_write_mbx(hw, msgbuf, 1, vf);
+}
+
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 vf;
+
+       for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+               /* process any reset requests */
+               if (!igb_check_for_rst(hw, vf))
+                       igb_vf_reset_event(adapter, vf);
+
+               /* process any messages pending */
+               if (!igb_check_for_msg(hw, vf))
+                       igb_rcv_msg_from_vf(adapter, vf);
+
+               /* process any acks */
+               if (!igb_check_for_ack(hw, vf))
+                       igb_rcv_ack_from_vf(adapter, vf);
+       }
+}
+
+/**
+ *  igb_set_uta - Set unicast filter table address
+ *  @adapter: board private structure
+ *
+ *  The unicast table address is a register array of 32-bit registers.
+ *  The table is meant to be used in a way similar to how the MTA is used
+ *  however due to certain limitations in the hardware it is necessary to
+ *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
+ *  enable bit to allow vlan tag stripping when promiscous mode is enabled
+ **/
+static void igb_set_uta(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       int i;
+
+       /* The UTA table only exists on 82576 hardware and newer */
+       if (hw->mac.type < e1000_82576)
+               return;
+
+       /* we only need to do this if VMDq is enabled */
+       if (!adapter->vfs_allocated_count)
+               return;
+
+       for (i = 0; i < hw->mac.uta_reg_count; i++)
+               array_wr32(E1000_UTA, i, ~0);
+}
+
 /**
  * igb_intr_msi - Interrupt Handler
  * @irq: interrupt number
@@ -3488,13 +4811,21 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  **/
 static irqreturn_t igb_intr_msi(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = data;
+       struct igb_q_vector *q_vector = adapter->q_vector[0];
        struct e1000_hw *hw = &adapter->hw;
        /* read ICR disables interrupts using IAM */
        u32 icr = rd32(E1000_ICR);
 
-       igb_write_itr(adapter->rx_ring);
+       igb_write_itr(q_vector);
+
+       if (icr & E1000_ICR_DRSTA)
+               schedule_work(&adapter->reset_task);
+
+       if (icr & E1000_ICR_DOUTSYNC) {
+               /* HW is reporting DMA is out of sync */
+               adapter->stats.doosync++;
+       }
 
        if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
                hw->mac.get_link_status = 1;
@@ -3502,36 +4833,41 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+       napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
 
 /**
- * igb_intr - Interrupt Handler
+ * igb_intr - Legacy Interrupt Handler
  * @irq: interrupt number
  * @data: pointer to a network interface device structure
  **/
 static irqreturn_t igb_intr(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = data;
+       struct igb_q_vector *q_vector = adapter->q_vector[0];
        struct e1000_hw *hw = &adapter->hw;
        /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
         * need for the IMC write */
        u32 icr = rd32(E1000_ICR);
-       u32 eicr = 0;
        if (!icr)
                return IRQ_NONE;  /* Not our interrupt */
 
-       igb_write_itr(adapter->rx_ring);
+       igb_write_itr(q_vector);
 
        /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
         * not set, then the adapter didn't send an interrupt */
        if (!(icr & E1000_ICR_INT_ASSERTED))
                return IRQ_NONE;
 
-       eicr = rd32(E1000_EICR);
+       if (icr & E1000_ICR_DRSTA)
+               schedule_work(&adapter->reset_task);
+
+       if (icr & E1000_ICR_DOUTSYNC) {
+               /* HW is reporting DMA is out of sync */
+               adapter->stats.doosync++;
+       }
 
        if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
                hw->mac.get_link_status = 1;
@@ -3540,11 +4876,32 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+       napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
 
+static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct e1000_hw *hw = &adapter->hw;
+
+       if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
+           (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
+               if (!adapter->msix_entries)
+                       igb_set_itr(adapter);
+               else
+                       igb_update_ring_itr(q_vector);
+       }
+
+       if (!test_bit(__IGB_DOWN, &adapter->state)) {
+               if (adapter->msix_entries)
+                       wr32(E1000_EIMS, q_vector->eims_value);
+               else
+                       igb_irq_enable(adapter);
+       }
+}
+
 /**
  * igb_poll - NAPI Rx polling callback
  * @napi: napi polling structure
@@ -3552,105 +4909,119 @@ static irqreturn_t igb_intr(int irq, void *data)
  **/
 static int igb_poll(struct napi_struct *napi, int budget)
 {
-       struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
-       struct igb_adapter *adapter = rx_ring->adapter;
-       struct net_device *netdev = adapter->netdev;
-       int tx_clean_complete, work_done = 0;
+       struct igb_q_vector *q_vector = container_of(napi,
+                                                    struct igb_q_vector,
+                                                    napi);
+       int tx_clean_complete = 1, work_done = 0;
 
-       /* this poll routine only supports one tx and one rx queue */
 #ifdef CONFIG_IGB_DCA
-       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_tx_dca(&adapter->tx_ring[0]);
+       if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+               igb_update_dca(q_vector);
 #endif
-       tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
+       if (q_vector->tx_ring)
+               tx_clean_complete = igb_clean_tx_irq(q_vector);
 
-#ifdef CONFIG_IGB_DCA
-       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_rx_dca(&adapter->rx_ring[0]);
-#endif
-       igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
+       if (q_vector->rx_ring)
+               igb_clean_rx_irq_adv(q_vector, &work_done, budget);
 
-       /* If no Tx and not enough Rx work done, exit the polling mode */
-       if ((tx_clean_complete && (work_done < budget)) ||
-           !netif_running(netdev)) {
-               if (adapter->itr_setting & 3)
-                       igb_set_itr(adapter);
-               netif_rx_complete(netdev, napi);
-               if (!test_bit(__IGB_DOWN, &adapter->state))
-                       igb_irq_enable(adapter);
-               return 0;
+       if (!tx_clean_complete)
+               work_done = budget;
+
+       /* If not enough Rx work done, exit the polling mode */
+       if (work_done < budget) {
+               napi_complete(napi);
+               igb_ring_irq_enable(q_vector);
        }
 
-       return 1;
+       return work_done;
 }
 
-static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
+/**
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @shhwtstamps: timestamp structure to update
+ * @regval: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions
+ */
+static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                   struct skb_shared_hwtstamps *shhwtstamps,
+                                   u64 regval)
 {
-       struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
-       struct igb_adapter *adapter = rx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
-       int work_done = 0;
+       u64 ns;
 
-#ifdef CONFIG_IGB_DCA
-       if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-               igb_update_rx_dca(rx_ring);
-#endif
-       igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
-
-
-       /* If not enough Rx work done, exit the polling mode */
-       if ((work_done == 0) || !netif_running(netdev)) {
-               netif_rx_complete(netdev, napi);
-
-               if (adapter->itr_setting & 3) {
-                       if (adapter->num_rx_queues == 1)
-                               igb_set_itr(adapter);
-                       else
-                               igb_update_ring_itr(rx_ring);
-               }
+       /*
+        * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
+        * 24 to match clock shift we setup earlier.
+        */
+       if (adapter->hw.mac.type == e1000_82580)
+               regval <<= IGB_82580_TSYNC_SHIFT;
+
+       ns = timecounter_cyc2time(&adapter->clock, regval);
+       timecompare_update(&adapter->compare, ns);
+       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+       shhwtstamps->hwtstamp = ns_to_ktime(ns);
+       shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
+}
 
-               if (!test_bit(__IGB_DOWN, &adapter->state))
-                       wr32(E1000_EIMS, rx_ring->eims_value);
+/**
+ * igb_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: pointer to q_vector containing needed info
+ * @skb: packet that was just sent
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+       union skb_shared_tx *shtx = skb_tx(skb);
+       struct e1000_hw *hw = &adapter->hw;
+       struct skb_shared_hwtstamps shhwtstamps;
+       u64 regval;
 
-               return 0;
-       }
+       /* if skb does not support hw timestamp or TX stamp not valid exit */
+       if (likely(!shtx->hardware) ||
+           !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
+               return;
 
-       return 1;
-}
+       regval = rd32(E1000_TXSTMPL);
+       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
 
-static inline u32 get_head(struct igb_ring *tx_ring)
-{
-       void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
-       return le32_to_cpu(*(volatile __le32 *)end);
+       igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+       skb_tstamp_tx(skb, &shhwtstamps);
 }
 
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: pointer to q_vector containing needed info
  * returns true if ring is completely cleaned
  **/
-static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
-       struct igb_adapter *adapter = tx_ring->adapter;
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct igb_ring *tx_ring = q_vector->tx_ring;
+       struct net_device *netdev = tx_ring->netdev;
        struct e1000_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
-       struct e1000_tx_desc *tx_desc;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
-       unsigned int i;
-       u32 head, oldhead;
-       unsigned int count = 0;
+       union e1000_adv_tx_desc *tx_desc, *eop_desc;
        unsigned int total_bytes = 0, total_packets = 0;
-       bool retval = true;
+       unsigned int i, eop, count = 0;
+       bool cleaned = false;
 
-       rmb();
-       head = get_head(tx_ring);
        i = tx_ring->next_to_clean;
-       while (1) {
-               while (i != head) {
-                       tx_desc = E1000_TX_DESC(*tx_ring, i);
+       eop = tx_ring->buffer_info[i].next_to_watch;
+       eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
+
+       while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+              (count < tx_ring->count)) {
+               for (cleaned = false; !cleaned; count++) {
+                       tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
                        buffer_info = &tx_ring->buffer_info[i];
+                       cleaned = (i == eop);
                        skb = buffer_info->skb;
 
                        if (skb) {
@@ -3662,33 +5033,26 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
                                            skb->len;
                                total_packets += segs;
                                total_bytes += bytecount;
+
+                               igb_tx_hwtstamp(q_vector, skb);
                        }
 
-                       igb_unmap_and_free_tx_resource(adapter, buffer_info);
+                       igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+                       tx_desc->wb.status = 0;
 
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
-
-                       count++;
-                       if (count == IGB_MAX_TX_CLEAN) {
-                               retval = false;
-                               goto done_cleaning;
-                       }
                }
-               oldhead = head;
-               rmb();
-               head = get_head(tx_ring);
-               if (head == oldhead)
-                       goto done_cleaning;
-       }  /* while (1) */
-
-done_cleaning:
+               eop = tx_ring->buffer_info[i].next_to_watch;
+               eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
+       }
+
        tx_ring->next_to_clean = i;
 
        if (unlikely(count &&
                     netif_carrier_ok(netdev) &&
-                    IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+                    igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
@@ -3696,7 +5060,7 @@ done_cleaning:
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !(test_bit(__IGB_DOWN, &adapter->state))) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
+                       tx_ring->tx_stats.restart_queue++;
                }
        }
 
@@ -3706,33 +5070,31 @@ done_cleaning:
                tx_ring->detect_tx_hung = false;
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
-                              (adapter->tx_timeout_factor * HZ))
-                   && !(rd32(E1000_STATUS) &
-                        E1000_STATUS_TXOFF)) {
+                              (adapter->tx_timeout_factor * HZ)) &&
+                   !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
 
-                       tx_desc = E1000_TX_DESC(*tx_ring, i);
                        /* detected Tx unit hang */
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&tx_ring->pdev->dev,
                                "Detected Tx Unit Hang\n"
                                "  Tx Queue             <%d>\n"
                                "  TDH                  <%x>\n"
                                "  TDT                  <%x>\n"
                                "  next_to_use          <%x>\n"
                                "  next_to_clean        <%x>\n"
-                               "  head (WB)            <%x>\n"
                                "buffer_info[next_to_clean]\n"
                                "  time_stamp           <%lx>\n"
+                               "  next_to_watch        <%x>\n"
                                "  jiffies              <%lx>\n"
                                "  desc.status          <%x>\n",
                                tx_ring->queue_index,
-                               readl(adapter->hw.hw_addr + tx_ring->head),
-                               readl(adapter->hw.hw_addr + tx_ring->tail),
+                               readl(tx_ring->head),
+                               readl(tx_ring->tail),
                                tx_ring->next_to_use,
                                tx_ring->next_to_clean,
-                               head,
-                               tx_ring->buffer_info[i].time_stamp,
+                               tx_ring->buffer_info[eop].time_stamp,
+                               eop,
                                jiffies,
-                               tx_desc->upper.fields.status);
+                               eop_desc->wb.status);
                        netif_stop_subqueue(netdev, tx_ring->queue_index);
                }
        }
@@ -3740,121 +5102,122 @@ done_cleaning:
        tx_ring->total_packets += total_packets;
        tx_ring->tx_stats.bytes += total_bytes;
        tx_ring->tx_stats.packets += total_packets;
-       adapter->net_stats.tx_bytes += total_bytes;
-       adapter->net_stats.tx_packets += total_packets;
-       return retval;
-}
-
-#ifdef CONFIG_IGB_LRO
- /**
- * igb_get_skb_hdr - helper function for LRO header processing
- * @skb: pointer to sk_buff to be added to LRO packet
- * @iphdr: pointer to ip header structure
- * @tcph: pointer to tcp header structure
- * @hdr_flags: pointer to header flags
- * @priv: pointer to the receive descriptor for the current sk_buff
- **/
-static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
-                           u64 *hdr_flags, void *priv)
-{
-       union e1000_adv_rx_desc *rx_desc = priv;
-       u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
-                      (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
-
-       /* Verify that this is a valid IPv4 TCP packet */
-       if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
-                         E1000_RXDADV_PKTTYPE_TCP))
-               return -1;
-
-       /* Set network headers */
-       skb_reset_network_header(skb);
-       skb_set_transport_header(skb, ip_hdrlen(skb));
-       *iphdr = ip_hdr(skb);
-       *tcph = tcp_hdr(skb);
-       *hdr_flags = LRO_IPV4 | LRO_TCP;
-
-       return 0;
-
+       return (count < tx_ring->count);
 }
-#endif /* CONFIG_IGB_LRO */
 
 /**
  * igb_receive_skb - helper function to handle rx indications
- * @ring: pointer to receive ring receving this packet 
- * @status: descriptor status field as written by hardware
- * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
- * @skb: pointer to sk_buff to be indicated to stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
  **/
-static void igb_receive_skb(struct igb_ring *ring, u8 status,
-                            union e1000_adv_rx_desc * rx_desc,
-                            struct sk_buff *skb)
-{
-       struct igb_adapter * adapter = ring->adapter;
-       bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
-
-#ifdef CONFIG_IGB_LRO
-       if (adapter->netdev->features & NETIF_F_LRO &&
-           skb->ip_summed == CHECKSUM_UNNECESSARY) {
-               if (vlan_extracted)
-                       lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
-                                          adapter->vlgrp,
-                                          le16_to_cpu(rx_desc->wb.upper.vlan),
-                                          rx_desc);
-               else
-                       lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
-               ring->lro_used = 1;
-       } else {
-#endif
-               if (vlan_extracted)
-                       vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
-                                         le16_to_cpu(rx_desc->wb.upper.vlan));
-               else
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
 
-                       netif_receive_skb(skb);
-#ifdef CONFIG_IGB_LRO
-       }
-#endif
+       if (vlan_tag)
+               vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
+                                vlan_tag, skb);
+       else
+               napi_gro_receive(&q_vector->napi, skb);
 }
 
-
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
                                       u32 status_err, struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Ignore Checksum bit is set or checksum is disabled through ethtool */
-       if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
+       if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+            (status_err & E1000_RXD_STAT_IXSM))
                return;
+
        /* TCP/UDP checksum error bit is set */
        if (status_err &
            (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+               /*
+                * work around errata with sctp packets where the TCPE aka
+                * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+                * packets, (aka let the stack check the crc32c)
+                */
+               if ((skb->len == 60) &&
+                   (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
+                       ring->rx_stats.csum_err++;
+
                /* let the stack verify checksum errors */
-               adapter->hw_csum_err++;
                return;
-       }
-       /* It must be a TCP or UDP packet with a valid checksum */
-       if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
+       /* It must be a TCP or UDP packet with a valid checksum */
+       if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
+}
+
+static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+                                   struct sk_buff *skb)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct e1000_hw *hw = &adapter->hw;
+       u64 regval;
+
+       /*
+        * If this bit is set, then the RX registers contain the time stamp. No
+        * other packet will be time stamped until we read these registers, so
+        * read the registers to make them available again. Because only one
+        * packet can be time stamped at a time, we know that the register
+        * values must belong to this one here and therefore we don't need to
+        * compare any of the additional attributes stored for it.
+        *
+        * If nothing went wrong, then it should have a skb_shared_tx that we
+        * can turn into a skb_shared_hwtstamps.
+        */
+       if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
+               return;
+       if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+               return;
 
-       adapter->hw_csum_good++;
+       regval = rd32(E1000_RXSTMPL);
+       regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+
+       igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
+                               union e1000_adv_rx_desc *rx_desc)
+{
+       /* HW will not DMA in data larger than the given buffer, even if it
+        * parses the (NFS, of course) header to be larger.  In that case, it
+        * fills the header buffer and spills the rest into the page.
+        */
+       u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
+                  E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+       if (hlen > rx_ring->rx_buffer_len)
+               hlen = rx_ring->rx_buffer_len;
+       return hlen;
 }
 
-static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
-                                int *work_done, int budget)
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
+                                 int *work_done, int budget)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
+       struct igb_ring *rx_ring = q_vector->rx_ring;
+       struct net_device *netdev = rx_ring->netdev;
+       struct pci_dev *pdev = rx_ring->pdev;
        union e1000_adv_rx_desc *rx_desc , *next_rxd;
        struct igb_buffer *buffer_info , *next_buffer;
        struct sk_buff *skb;
-       unsigned int i;
-       u32 length, hlen, staterr;
        bool cleaned = false;
        int cleaned_count = 0;
+       int current_node = numa_node_id();
        unsigned int total_bytes = 0, total_packets = 0;
+       unsigned int i;
+       u32 staterr;
+       u16 length;
+       u16 vlan_tag;
 
        i = rx_ring->next_to_clean;
+       buffer_info = &rx_ring->buffer_info[i];
        rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 
@@ -3862,40 +5225,33 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
                if (*work_done >= budget)
                        break;
                (*work_done)++;
-               buffer_info = &rx_ring->buffer_info[i];
 
-               /* HW will not DMA in data larger than the given buffer, even
-                * if it parses the (NFS, of course) header to be larger.  In
-                * that case, it fills the header buffer and spills the rest
-                * into the page.
-                */
-               hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
-                 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-               if (hlen > adapter->rx_ps_hdr_size)
-                       hlen = adapter->rx_ps_hdr_size;
+               skb = buffer_info->skb;
+               prefetch(skb->data - NET_IP_ALIGN);
+               buffer_info->skb = NULL;
+
+               i++;
+               if (i == rx_ring->count)
+                       i = 0;
+
+               next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
+               prefetch(next_rxd);
+               next_buffer = &rx_ring->buffer_info[i];
 
                length = le16_to_cpu(rx_desc->wb.upper.length);
                cleaned = true;
                cleaned_count++;
 
-               skb = buffer_info->skb;
-               prefetch(skb->data - NET_IP_ALIGN);
-               buffer_info->skb = NULL;
-               if (!adapter->rx_ps_hdr_size) {
-                       pci_unmap_single(pdev, buffer_info->dma,
-                                        adapter->rx_buffer_len +
-                                          NET_IP_ALIGN,
-                                        PCI_DMA_FROMDEVICE);
-                       skb_put(skb, length);
-                       goto send_up;
-               }
-
-               if (!skb_shinfo(skb)->nr_frags) {
+               if (buffer_info->dma) {
                        pci_unmap_single(pdev, buffer_info->dma,
-                                        adapter->rx_ps_hdr_size +
-                                          NET_IP_ALIGN,
+                                        rx_ring->rx_buffer_len,
                                         PCI_DMA_FROMDEVICE);
-                       skb_put(skb, hlen);
+                       buffer_info->dma = 0;
+                       if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
+                               skb_put(skb, length);
+                               goto send_up;
+                       }
+                       skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
                }
 
                if (length) {
@@ -3908,24 +5264,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
                                                buffer_info->page_offset,
                                                length);
 
-                       if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
-                           (page_count(buffer_info->page) != 1))
+                       if ((page_count(buffer_info->page) != 1) ||
+                           (page_to_nid(buffer_info->page) != current_node))
                                buffer_info->page = NULL;
                        else
                                get_page(buffer_info->page);
 
                        skb->len += length;
                        skb->data_len += length;
-
                        skb->truesize += length;
                }
-send_up:
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
-               next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
-               prefetch(next_rxd);
-               next_buffer = &rx_ring->buffer_info[i];
 
                if (!(staterr & E1000_RXD_STAT_EOP)) {
                        buffer_info->skb = next_buffer->skb;
@@ -3934,20 +5282,25 @@ send_up:
                        next_buffer->dma = 0;
                        goto next_desc;
                }
-
+send_up:
                if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
 
+               igb_rx_hwtstamp(q_vector, staterr, skb);
                total_bytes += skb->len;
                total_packets++;
 
-               igb_rx_checksum_adv(adapter, staterr, skb);
+               igb_rx_checksum_adv(rx_ring, staterr, skb);
 
                skb->protocol = eth_type_trans(skb, netdev);
+               skb_record_rx_queue(skb, rx_ring->queue_index);
 
-               igb_receive_skb(rx_ring, staterr, rx_desc, skb);
+               vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
+                           le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+
+               igb_receive_skb(q_vector, skb, vlan_tag);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -3961,19 +5314,11 @@ next_desc:
                /* use prefetched values */
                rx_desc = next_rxd;
                buffer_info = next_buffer;
-
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
 
        rx_ring->next_to_clean = i;
-       cleaned_count = IGB_DESC_UNUSED(rx_ring);
-
-#ifdef CONFIG_IGB_LRO
-       if (rx_ring->lro_used) {
-               lro_flush_all(&rx_ring->lro_mgr);
-               rx_ring->lro_used = 0;
-       }
-#endif
+       cleaned_count = igb_desc_unused(rx_ring);
 
        if (cleaned_count)
                igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
@@ -3982,38 +5327,35 @@ next_desc:
        rx_ring->total_bytes += total_bytes;
        rx_ring->rx_stats.packets += total_packets;
        rx_ring->rx_stats.bytes += total_bytes;
-       adapter->net_stats.rx_bytes += total_bytes;
-       adapter->net_stats.rx_packets += total_packets;
        return cleaned;
 }
 
-
 /**
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-                                    int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-       struct igb_adapter *adapter = rx_ring->adapter;
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = rx_ring->netdev;
        union e1000_adv_rx_desc *rx_desc;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
+       int bufsz;
 
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
 
+       bufsz = rx_ring->rx_buffer_len;
+
        while (cleaned_count--) {
                rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-               if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+               if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
                        if (!buffer_info->page) {
-                               buffer_info->page = alloc_page(GFP_ATOMIC);
+                               buffer_info->page = netdev_alloc_page(netdev);
                                if (!buffer_info->page) {
-                                       adapter->alloc_rx_buff_failed++;
+                                       rx_ring->rx_stats.alloc_failed++;
                                        goto no_buffers;
                                }
                                buffer_info->page_offset = 0;
@@ -4021,49 +5363,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                                buffer_info->page_offset ^= PAGE_SIZE / 2;
                        }
                        buffer_info->page_dma =
-                               pci_map_page(pdev,
-                                            buffer_info->page,
+                               pci_map_page(rx_ring->pdev, buffer_info->page,
                                             buffer_info->page_offset,
                                             PAGE_SIZE / 2,
                                             PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(rx_ring->pdev,
+                                                 buffer_info->page_dma)) {
+                               buffer_info->page_dma = 0;
+                               rx_ring->rx_stats.alloc_failed++;
+                               goto no_buffers;
+                       }
                }
 
-               if (!buffer_info->skb) {
-                       int bufsz;
-
-                       if (adapter->rx_ps_hdr_size)
-                               bufsz = adapter->rx_ps_hdr_size;
-                       else
-                               bufsz = adapter->rx_buffer_len;
-                       bufsz += NET_IP_ALIGN;
-                       skb = netdev_alloc_skb(netdev, bufsz);
-
+               skb = buffer_info->skb;
+               if (!skb) {
+                       skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_failed++;
                                goto no_buffers;
                        }
 
-                       /* Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
-
                        buffer_info->skb = skb;
-                       buffer_info->dma = pci_map_single(pdev, skb->data,
+               }
+               if (!buffer_info->dma) {
+                       buffer_info->dma = pci_map_single(rx_ring->pdev,
+                                                         skb->data,
                                                          bufsz,
                                                          PCI_DMA_FROMDEVICE);
-
+                       if (pci_dma_mapping_error(rx_ring->pdev,
+                                                 buffer_info->dma)) {
+                               buffer_info->dma = 0;
+                               rx_ring->rx_stats.alloc_failed++;
+                               goto no_buffers;
+                       }
                }
                /* Refresh the desc even if buffer_addrs didn't change because
                 * each write-back erases this info. */
-               if (adapter->rx_ps_hdr_size) {
+               if (bufsz < IGB_RXBUFFER_1024) {
                        rx_desc->read.pkt_addr =
                             cpu_to_le64(buffer_info->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
                } else {
-                       rx_desc->read.pkt_addr =
-                            cpu_to_le64(buffer_info->dma);
+                       rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
                        rx_desc->read.hdr_addr = 0;
                }
 
@@ -4086,7 +5427,7 @@ no_buffers:
                 * applicable for weak-ordered memory model archs,
                 * such as IA-64). */
                wmb();
-               writel(i, adapter->hw.hw_addr + rx_ring->tail);
+               writel(i, rx_ring->tail);
        }
 }
 
@@ -4109,11 +5450,8 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                data->phy_id = adapter->hw.phy.addr;
                break;
        case SIOCGMIIREG:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw,
-                                                    data->reg_num
-                                                    & 0x1F, &data->val_out))
+               if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+                                    &data->val_out))
                        return -EIO;
                break;
        case SIOCSMIIREG:
@@ -4124,6 +5462,170 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 }
 
 /**
+ * igb_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+static int igb_hwtstamp_ioctl(struct net_device *netdev,
+                             struct ifreq *ifr, int cmd)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       struct hwtstamp_config config;
+       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+       u32 tsync_rx_cfg = 0;
+       bool is_l4 = false;
+       bool is_l2 = false;
+       u32 regval;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               tsync_tx_ctl = 0;
+       case HWTSTAMP_TX_ON:
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               tsync_rx_ctl = 0;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_ALL:
+               /*
+                * register TSYNCRXCFG must be set, therefore it is not
+                * possible to time stamp both Sync and Delay_Req messages
+                * => fall back to time stamping all packets
+                */
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               is_l2 = true;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (hw->mac.type == e1000_82575) {
+               if (tsync_rx_ctl | tsync_tx_ctl)
+                       return -EINVAL;
+               return 0;
+       }
+
+       /* enable/disable TX */
+       regval = rd32(E1000_TSYNCTXCTL);
+       regval &= ~E1000_TSYNCTXCTL_ENABLED;
+       regval |= tsync_tx_ctl;
+       wr32(E1000_TSYNCTXCTL, regval);
+
+       /* enable/disable RX */
+       regval = rd32(E1000_TSYNCRXCTL);
+       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+       regval |= tsync_rx_ctl;
+       wr32(E1000_TSYNCRXCTL, regval);
+
+       /* define which PTP packets are time stamped */
+       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+       /* define ethertype filter for timestamped packets */
+       if (is_l2)
+               wr32(E1000_ETQF(3),
+                               (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+                                E1000_ETQF_1588 | /* enable timestamping */
+                                ETH_P_1588));     /* 1588 eth protocol type */
+       else
+               wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+       /* L4 Queue Filter[3]: filter by destination port and protocol */
+       if (is_l4) {
+               u32 ftqf = (IPPROTO_UDP /* UDP */
+                       | E1000_FTQF_VF_BP /* VF not compared */
+                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+                       | E1000_FTQF_MASK); /* mask all inputs */
+               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+               wr32(E1000_IMIR(3), htons(PTP_PORT));
+               wr32(E1000_IMIREXT(3),
+                    (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+               if (hw->mac.type == e1000_82576) {
+                       /* enable source port check */
+                       wr32(E1000_SPQF(3), htons(PTP_PORT));
+                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+               }
+               wr32(E1000_FTQF(3), ftqf);
+       } else {
+               wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+       }
+       wrfl();
+
+       adapter->hwtstamp_config = config;
+
+       /* clear TX/RX time stamp registers, just to be sure */
+       regval = rd32(E1000_TXSTMPH);
+       regval = rd32(E1000_RXSTMPH);
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
+}
+
+/**
  * igb_ioctl -
  * @netdev:
  * @ifreq:
@@ -4136,11 +5638,41 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG:
        case SIOCSMIIREG:
                return igb_mii_ioctl(netdev, ifr, cmd);
+       case SIOCSHWTSTAMP:
+               return igb_hwtstamp_ioctl(netdev, ifr, cmd);
        default:
                return -EOPNOTSUPP;
        }
 }
 
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+       u16 cap_offset;
+
+       cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+       if (!cap_offset)
+               return -E1000_ERR_CONFIG;
+
+       pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+       return 0;
+}
+
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+       u16 cap_offset;
+
+       cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+       if (!cap_offset)
+               return -E1000_ERR_CONFIG;
+
+       pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+       return 0;
+}
+
 static void igb_vlan_rx_register(struct net_device *netdev,
                                 struct vlan_group *grp)
 {
@@ -4157,27 +5689,19 @@ static void igb_vlan_rx_register(struct net_device *netdev,
                ctrl |= E1000_CTRL_VME;
                wr32(E1000_CTRL, ctrl);
 
-               /* enable VLAN receive filtering */
+               /* Disable CFI check */
                rctl = rd32(E1000_RCTL);
                rctl &= ~E1000_RCTL_CFIEN;
                wr32(E1000_RCTL, rctl);
-               igb_update_mng_vlan(adapter);
-               wr32(E1000_RLPML,
-                               adapter->max_frame_size + VLAN_TAG_SIZE);
        } else {
                /* disable VLAN tag insert/strip */
                ctrl = rd32(E1000_CTRL);
                ctrl &= ~E1000_CTRL_VME;
                wr32(E1000_CTRL, ctrl);
-
-               if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
-                       igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-                       adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-               }
-               wr32(E1000_RLPML,
-                               adapter->max_frame_size);
        }
 
+       igb_rlpml_set(adapter);
+
        if (!test_bit(__IGB_DOWN, &adapter->state))
                igb_irq_enable(adapter);
 }
@@ -4186,24 +5710,21 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 vfta, index;
+       int pf_id = adapter->vfs_allocated_count;
 
-       if ((adapter->hw.mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-           (vid == adapter->mng_vlan_id))
-               return;
-       /* add VID to filter table */
-       index = (vid >> 5) & 0x7F;
-       vfta = array_rd32(E1000_VFTA, index);
-       vfta |= (1 << (vid & 0x1F));
-       igb_write_vfta(&adapter->hw, index, vfta);
+       /* attempt to add filter to vlvf array */
+       igb_vlvf_set(adapter, vid, true, pf_id);
+
+       /* add the filter since PF can receive vlans w/o entry in vlvf */
+       igb_vfta_set(hw, vid, true);
 }
 
 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 vfta, index;
+       int pf_id = adapter->vfs_allocated_count;
+       s32 err;
 
        igb_irq_disable(adapter);
        vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -4211,19 +5732,12 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (!test_bit(__IGB_DOWN, &adapter->state))
                igb_irq_enable(adapter);
 
-       if ((adapter->hw.mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-           (vid == adapter->mng_vlan_id)) {
-               /* release control to f/w */
-               igb_release_hw_control(adapter);
-               return;
-       }
+       /* remove vlan from VLVF table array */
+       err = igb_vlvf_set(adapter, vid, false, pf_id);
 
-       /* remove VID from filter table */
-       index = (vid >> 5) & 0x7F;
-       vfta = array_rd32(E1000_VFTA, index);
-       vfta &= ~(1 << (vid & 0x1F));
-       igb_write_vfta(&adapter->hw, index, vfta);
+       /* if vid was not present in VLVF just remove it from table */
+       if (err)
+               igb_vfta_set(hw, vid, false);
 }
 
 static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -4242,18 +5756,11 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
 
 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
 {
+       struct pci_dev *pdev = adapter->pdev;
        struct e1000_mac_info *mac = &adapter->hw.mac;
 
        mac->autoneg = 0;
 
-       /* Fiber NICs only allow 1000 gbps Full duplex */
-       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
-               spddplx != (SPEED_1000 + DUPLEX_FULL)) {
-               dev_err(&adapter->pdev->dev,
-                       "Unsupported Speed/Duplex configuration\n");
-               return -EINVAL;
-       }
-
        switch (spddplx) {
        case SPEED_10 + DUPLEX_HALF:
                mac->forced_speed_duplex = ADVERTISE_10_HALF;
@@ -4273,15 +5780,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
                break;
        case SPEED_1000 + DUPLEX_HALF: /* not supported */
        default:
-               dev_err(&adapter->pdev->dev,
-                       "Unsupported Speed/Duplex configuration\n");
+               dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
                return -EINVAL;
        }
        return 0;
 }
 
-
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
@@ -4297,9 +5802,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
        if (netif_running(netdev))
                igb_close(netdev);
 
-       igb_reset_interrupt_capability(adapter);
-
-       igb_free_queues(adapter);
+       igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -4313,7 +5816,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
 
        if (wufc) {
                igb_setup_rctl(adapter);
-               igb_set_multi(netdev);
+               igb_set_rx_mode(netdev);
 
                /* turn on all-multi mode if wake on multicast is enabled */
                if (wufc & E1000_WUFC_MC) {
@@ -4331,7 +5834,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
                wr32(E1000_CTRL, ctrl);
 
                /* Allow time for pending master requests to run */
-               igb_disable_pcie_master(&adapter->hw);
+               igb_disable_pcie_master(hw);
 
                wr32(E1000_WUC, E1000_WUC_PME_EN);
                wr32(E1000_WUFC, wufc);
@@ -4340,15 +5843,11 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
                wr32(E1000_WUFC, 0);
        }
 
-       /* make sure adapter isn't asleep if manageability/wol is enabled */
-       if (wufc || adapter->en_mng_pt) {
-               pci_enable_wake(pdev, PCI_D3hot, 1);
-               pci_enable_wake(pdev, PCI_D3cold, 1);
-       } else {
-               igb_shutdown_fiber_serdes_link_82575(hw);
-               pci_enable_wake(pdev, PCI_D3hot, 0);
-               pci_enable_wake(pdev, PCI_D3cold, 0);
-       }
+       *enable_wake = wufc || adapter->en_mng_pt;
+       if (!*enable_wake)
+               igb_power_down_link(adapter);
+       else
+               igb_power_up_link(adapter);
 
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant. */
@@ -4356,12 +5855,29 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
 
        pci_disable_device(pdev);
 
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
        return 0;
 }
 
 #ifdef CONFIG_PM
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       int retval;
+       bool wake;
+
+       retval = __igb_shutdown(pdev, &wake);
+       if (retval)
+               return retval;
+
+       if (wake) {
+               pci_prepare_to_sleep(pdev);
+       } else {
+               pci_wake_from_d3(pdev, false);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+
+       return 0;
+}
+
 static int igb_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4371,11 +5887,9 @@ static int igb_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
+       pci_save_state(pdev);
 
-       if (adapter->need_ioport)
-               err = pci_enable_device(pdev);
-       else
-               err = pci_enable_device_mem(pdev);
+       err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
                        "igb: Cannot enable PCI device from suspend\n");
@@ -4386,16 +5900,17 @@ static int igb_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
-       igb_set_interrupt_capability(adapter);
-
-       if (igb_alloc_queues(adapter)) {
+       if (igb_init_interrupt_scheme(adapter)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
 
-       /* e1000_power_up_phy(adapter); */
-
        igb_reset(adapter);
+
+       /* let the f/w know that the h/w is now under the control of the
+        * driver. */
+       igb_get_hw_control(adapter);
+
        wr32(E1000_WUS, ~0);
 
        if (netif_running(netdev)) {
@@ -4406,17 +5921,20 @@ static int igb_resume(struct pci_dev *pdev)
 
        netif_device_attach(netdev);
 
-       /* let the f/w know that the h/w is now under the control of the
-        * driver. */
-       igb_get_hw_control(adapter);
-
        return 0;
 }
 #endif
 
 static void igb_shutdown(struct pci_dev *pdev)
 {
-       igb_suspend(pdev, PMSG_SUSPEND);
+       bool wake;
+
+       __igb_shutdown(pdev, &wake);
+
+       if (system_state == SYSTEM_POWER_OFF) {
+               pci_wake_from_d3(pdev, wake);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4428,22 +5946,21 @@ static void igb_shutdown(struct pci_dev *pdev)
 static void igb_netpoll(struct net_device *netdev)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
        int i;
-       int work_done = 0;
-
-       igb_irq_disable(adapter);
-       adapter->flags |= IGB_FLAG_IN_NETPOLL;
-
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_clean_tx_irq(&adapter->tx_ring[i]);
 
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_clean_rx_irq_adv(&adapter->rx_ring[i],
-                                    &work_done,
-                                    adapter->rx_ring[i].napi.weight);
+       if (!adapter->msix_entries) {
+               struct igb_q_vector *q_vector = adapter->q_vector[0];
+               igb_irq_disable(adapter);
+               napi_schedule(&q_vector->napi);
+               return;
+       }
 
-       adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
-       igb_irq_enable(adapter);
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               struct igb_q_vector *q_vector = adapter->q_vector[i];
+               wr32(E1000_EIMC, q_vector->eims_value);
+               napi_schedule(&q_vector->napi);
+       }
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
@@ -4463,6 +5980,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
 
        netif_device_detach(netdev);
 
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
        if (netif_running(netdev))
                igb_down(adapter);
        pci_disable_device(pdev);
@@ -4483,27 +6003,34 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       pci_ers_result_t result;
        int err;
 
-       if (adapter->need_ioport)
-               err = pci_enable_device(pdev);
-       else
-               err = pci_enable_device_mem(pdev);
-       if (err) {
+       if (pci_enable_device_mem(pdev)) {
                dev_err(&pdev->dev,
                        "Cannot re-enable PCI device after reset.\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-       pci_set_master(pdev);
-       pci_restore_state(pdev);
+               result = PCI_ERS_RESULT_DISCONNECT;
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+               pci_save_state(pdev);
 
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
+               pci_enable_wake(pdev, PCI_D3cold, 0);
 
-       igb_reset(adapter);
-       wr32(E1000_WUS, ~0);
+               igb_reset(adapter);
+               wr32(E1000_WUS, ~0);
+               result = PCI_ERS_RESULT_RECOVERED;
+       }
+
+       err = pci_cleanup_aer_uncorrect_error_status(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
+                       "failed 0x%0x\n", err);
+               /* non-fatal, continue */
+       }
 
-       return PCI_ERS_RESULT_RECOVERED;
+       return result;
 }
 
 /**
@@ -4531,7 +6058,113 @@ static void igb_io_resume(struct pci_dev *pdev)
        /* let the f/w know that the h/w is now under the control of the
         * driver. */
        igb_get_hw_control(adapter);
+}
+
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+                             u8 qsel)
+{
+       u32 rar_low, rar_high;
+       struct e1000_hw *hw = &adapter->hw;
+
+       /* HW expects these in little endian so we reverse the byte order
+        * from network order (big endian) to little endian
+        */
+       rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+                 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+       /* Indicate to hardware the Address is Valid. */
+       rar_high |= E1000_RAH_AV;
+
+       if (hw->mac.type == e1000_82575)
+               rar_high |= E1000_RAH_POOL_1 * qsel;
+       else
+               rar_high |= E1000_RAH_POOL_1 << qsel;
+
+       wr32(E1000_RAL(index), rar_low);
+       wrfl();
+       wr32(E1000_RAH(index), rar_high);
+       wrfl();
+}
+
+static int igb_set_vf_mac(struct igb_adapter *adapter,
+                          int vf, unsigned char *mac_addr)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       /* VF MAC addresses start at end of receive addresses and moves
+        * torwards the first, as a result a collision should not be possible */
+       int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+
+       memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+
+       igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
+
+       return 0;
+}
+
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+               return -EINVAL;
+       adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+       dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+       dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+                                     " change effective.");
+       if (test_bit(__IGB_DOWN, &adapter->state)) {
+               dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+                        " but the PF device is not up.\n");
+               dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+                        " attempting to use the VF device.\n");
+       }
+       return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+       return -EOPNOTSUPP;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+                                int vf, struct ifla_vf_info *ivi)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       if (vf >= adapter->vfs_allocated_count)
+               return -EINVAL;
+       ivi->vf = vf;
+       memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+       ivi->tx_rate = 0;
+       ivi->vlan = adapter->vf_data[vf].pf_vlan;
+       ivi->qos = adapter->vf_data[vf].pf_qos;
+       return 0;
+}
+
+static void igb_vmm_control(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 reg;
+
+       /* replication is not supported for 82575 */
+       if (hw->mac.type == e1000_82575)
+               return;
 
+       /* enable replication vlan tag stripping */
+       reg = rd32(E1000_RPLOLR);
+       reg |= E1000_RPLOLR_STRVLAN;
+       wr32(E1000_RPLOLR, reg);
+
+       /* notify HW that the MAC is adding vlan tags */
+       reg = rd32(E1000_DTXCTL);
+       reg |= E1000_DTXCTL_VLAN_ADDED;
+       wr32(E1000_DTXCTL, reg);
+
+       if (adapter->vfs_allocated_count) {
+               igb_vmdq_set_loopback_pf(hw, true);
+               igb_vmdq_set_replication_pf(hw, true);
+       } else {
+               igb_vmdq_set_loopback_pf(hw, false);
+               igb_vmdq_set_replication_pf(hw, false);
+       }
 }
 
 /* igb_main.c */