/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
#include "igb.h"
int stat_offset;
};
-#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
- offsetof(struct igb_adapter, m)
+#define IGB_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+ .stat_offset = offsetof(struct igb_adapter, _stat) \
+}
static const struct igb_stats igb_gstrings_stats[] = {
- { "rx_packets", IGB_STAT(stats.gprc) },
- { "tx_packets", IGB_STAT(stats.gptc) },
- { "rx_bytes", IGB_STAT(stats.gorc) },
- { "tx_bytes", IGB_STAT(stats.gotc) },
- { "rx_broadcast", IGB_STAT(stats.bprc) },
- { "tx_broadcast", IGB_STAT(stats.bptc) },
- { "rx_multicast", IGB_STAT(stats.mprc) },
- { "tx_multicast", IGB_STAT(stats.mptc) },
- { "rx_errors", IGB_STAT(net_stats.rx_errors) },
- { "tx_errors", IGB_STAT(net_stats.tx_errors) },
- { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
- { "multicast", IGB_STAT(stats.mprc) },
- { "collisions", IGB_STAT(stats.colc) },
- { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
- { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
- { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
- { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
- { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
- { "rx_missed_errors", IGB_STAT(stats.mpc) },
- { "tx_aborted_errors", IGB_STAT(stats.ecol) },
- { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
- { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
- { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
- { "tx_window_errors", IGB_STAT(stats.latecol) },
- { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
- { "tx_deferred_ok", IGB_STAT(stats.dc) },
- { "tx_single_coll_ok", IGB_STAT(stats.scc) },
- { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
- { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
- { "tx_restart_queue", IGB_STAT(restart_queue) },
- { "rx_long_length_errors", IGB_STAT(stats.roc) },
- { "rx_short_length_errors", IGB_STAT(stats.ruc) },
- { "rx_align_errors", IGB_STAT(stats.algnerrc) },
- { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
- { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
- { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
- { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
- { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
- { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
- { "rx_long_byte_count", IGB_STAT(stats.gorc) },
- { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
- { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
- { "rx_header_split", IGB_STAT(rx_hdr_split) },
- { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
- { "tx_smbus", IGB_STAT(stats.mgptc) },
- { "rx_smbus", IGB_STAT(stats.mgprc) },
- { "dropped_smbus", IGB_STAT(stats.mgpdc) },
-#ifdef CONFIG_IGB_LRO
- { "lro_aggregated", IGB_STAT(lro_aggregated) },
- { "lro_flushed", IGB_STAT(lro_flushed) },
- { "lro_no_desc", IGB_STAT(lro_no_desc) },
-#endif
+ IGB_STAT("rx_packets", stats.gprc),
+ IGB_STAT("tx_packets", stats.gptc),
+ IGB_STAT("rx_bytes", stats.gorc),
+ IGB_STAT("tx_bytes", stats.gotc),
+ IGB_STAT("rx_broadcast", stats.bprc),
+ IGB_STAT("tx_broadcast", stats.bptc),
+ IGB_STAT("rx_multicast", stats.mprc),
+ IGB_STAT("tx_multicast", stats.mptc),
+ IGB_STAT("multicast", stats.mprc),
+ IGB_STAT("collisions", stats.colc),
+ IGB_STAT("rx_crc_errors", stats.crcerrs),
+ IGB_STAT("rx_no_buffer_count", stats.rnbc),
+ IGB_STAT("rx_missed_errors", stats.mpc),
+ IGB_STAT("tx_aborted_errors", stats.ecol),
+ IGB_STAT("tx_carrier_errors", stats.tncrs),
+ IGB_STAT("tx_window_errors", stats.latecol),
+ IGB_STAT("tx_abort_late_coll", stats.latecol),
+ IGB_STAT("tx_deferred_ok", stats.dc),
+ IGB_STAT("tx_single_coll_ok", stats.scc),
+ IGB_STAT("tx_multi_coll_ok", stats.mcc),
+ IGB_STAT("tx_timeout_count", tx_timeout_count),
+ IGB_STAT("rx_long_length_errors", stats.roc),
+ IGB_STAT("rx_short_length_errors", stats.ruc),
+ IGB_STAT("rx_align_errors", stats.algnerrc),
+ IGB_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGB_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGB_STAT("tx_flow_control_xon", stats.xontxc),
+ IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGB_STAT("rx_long_byte_count", stats.gorc),
+ IGB_STAT("tx_dma_out_of_sync", stats.doosync),
+ IGB_STAT("tx_smbus", stats.mgptc),
+ IGB_STAT("rx_smbus", stats.mgprc),
+ IGB_STAT("dropped_smbus", stats.mgpdc),
+};
+
+#define IGB_NETDEV_STAT(_net_stat) { \
+ .stat_string = __stringify(_net_stat), \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+}
+static const struct igb_stats igb_gstrings_net_stats[] = {
+ IGB_NETDEV_STAT(rx_errors),
+ IGB_NETDEV_STAT(tx_errors),
+ IGB_NETDEV_STAT(tx_dropped),
+ IGB_NETDEV_STAT(rx_length_errors),
+ IGB_NETDEV_STAT(rx_over_errors),
+ IGB_NETDEV_STAT(rx_frame_errors),
+ IGB_NETDEV_STAT(rx_fifo_errors),
+ IGB_NETDEV_STAT(tx_fifo_errors),
+ IGB_NETDEV_STAT(tx_heartbeat_errors)
};
-#define IGB_QUEUE_STATS_LEN \
- ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
- ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
- (sizeof(struct igb_queue_stats) / sizeof(u64)))
#define IGB_GLOBAL_STATS_LEN \
- sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
-#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
+ (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
+#define IGB_NETDEV_STATS_LEN \
+ (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
+#define IGB_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
+#define IGB_TX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
+#define IGB_QUEUE_STATS_LEN \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGB_RX_QUEUE_STATS_LEN) + \
+ (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGB_TX_QUEUE_STATS_LEN))
+#define IGB_STATS_LEN \
+ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+
static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
-#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
+#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 status;
if (hw->phy.media_type == e1000_media_type_copper) {
ecmd->transceiver = XCVR_INTERNAL;
- if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
+ status = rd32(E1000_STATUS);
- adapter->hw.mac.ops.get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
- ecmd->speed = adapter->link_speed;
+ if (status & E1000_STATUS_LU) {
- /* unfortunately FULL_DUPLEX != DUPLEX_FULL
- * and HALF_DUPLEX != DUPLEX_HALF */
+ if ((status & E1000_STATUS_SPEED_1000) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
- if (adapter->link_duplex == FULL_DUPLEX)
+ if ((status & E1000_STATUS_FD) ||
+ hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
ecmd->duplex = -1;
}
- ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
- hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- if (hw->phy.media_type == e1000_media_type_fiber)
- hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
- else
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
ecmd->advertising = hw->phy.autoneg_advertised;
- } else
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
+ }
/* reset the link */
-
if (netif_running(adapter->netdev)) {
igb_down(adapter);
igb_up(adapter);
return 0;
}
+static u32 igb_get_link(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igb_has_link(adapter);
+}
+
static void igb_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->fc.type == e1000_fc_rx_pause)
+ if (hw->fc.current_mode == e1000_fc_rx_pause)
pause->rx_pause = 1;
- else if (hw->fc.type == e1000_fc_tx_pause)
+ else if (hw->fc.current_mode == e1000_fc_tx_pause)
pause->tx_pause = 1;
- else if (hw->fc.type == e1000_fc_full) {
+ else if (hw->fc.current_mode == e1000_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
- if (pause->rx_pause && pause->tx_pause)
- hw->fc.type = e1000_fc_full;
- else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.type = e1000_fc_rx_pause;
- else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.type = e1000_fc_tx_pause;
- else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.type = e1000_fc_none;
-
- hw->fc.original_type = hw->fc.type;
-
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
igb_down(adapter);
igb_up(adapter);
- } else
+ } else {
igb_reset(adapter);
- } else
- retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
- igb_setup_link(hw) : igb_force_mac_fc(hw));
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ retval = ((hw->phy.media_type == e1000_media_type_copper) ?
+ igb_force_mac_fc(hw) : igb_setup_link(hw));
+ }
clear_bit(__IGB_RESETTING, &adapter->state);
return retval;
static u32 igb_get_rx_csum(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- return adapter->rx_csum;
+ return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM);
}
static int igb_set_rx_csum(struct net_device *netdev, u32 data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- adapter->rx_csum = data;
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (data)
+ adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
+ else
+ adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
+ }
return 0;
}
static u32 igb_get_tx_csum(struct net_device *netdev)
{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
+ return (netdev->features & NETIF_F_IP_CSUM) != 0;
}
static int igb_set_tx_csum(struct net_device *netdev, u32 data)
{
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (data) {
+ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ if (adapter->hw.mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+ } else {
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SCTP_CSUM);
+ }
return 0;
}
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (data)
+ if (data) {
netdev->features |= NETIF_F_TSO;
- else
- netdev->features &= ~NETIF_F_TSO;
-
- if (data)
netdev->features |= NETIF_F_TSO6;
- else
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
+ }
dev_info(&adapter->pdev->dev, "TSO is %s\n",
data ? "Enabled" : "Disabled");
regs_buff[34] = rd32(E1000_RLPML);
regs_buff[35] = rd32(E1000_RFCTL);
regs_buff[36] = rd32(E1000_MRQC);
- regs_buff[37] = rd32(E1000_VMD_CTL);
+ regs_buff[37] = rd32(E1000_VT_CTL);
/* Transmit */
regs_buff[38] = rd32(E1000_TCTL);
regs_buff[119] = adapter->stats.scvpc;
regs_buff[120] = adapter->stats.hrmpc;
- /* These should probably be added to e1000_regs.h instead */
- #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
- #define E1000_RAL(_i) (0x05400 + ((_i) * 8))
- #define E1000_RAH(_i) (0x05404 + ((_i) * 8))
- #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
- #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
- #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
- #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
- #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
- #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
-
for (i = 0; i < 4; i++)
regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
for (i = 0; i < 4; i++)
- regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
+ regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
for (i = 0; i < 4; i++)
regs_buff[129 + i] = rd32(E1000_RDBAL(i));
for (i = 0; i < 4; i++)
return -ENOMEM;
if (hw->nvm.type == e1000_nvm_eeprom_spi)
- ret_val = hw->nvm.ops.read_nvm(hw, first_word,
+ ret_val = hw->nvm.ops.read(hw, first_word,
last_word - first_word + 1,
eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++) {
- ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1,
+ ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
&eeprom_buff[i]);
if (ret_val)
break;
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
- ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1,
+ ret_val = hw->nvm.ops.read(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
- ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1,
+ ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
for (i = 0; i < last_word - first_word + 1; i++)
eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
- ret_val = hw->nvm.ops.write_nvm(hw, first_word,
+ ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
/* Update the checksum over the first part of the EEPROM if needed
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
- adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data);
+ adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
struct ethtool_ringparam *ring)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_ring *tx_ring = adapter->tx_ring;
- struct igb_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IGB_MAX_RXD;
ring->tx_max_pending = IGB_MAX_TXD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
struct ethtool_ringparam *ring)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_buffer *old_buf;
- struct igb_buffer *old_rx_buf;
- void *old_desc;
- int i, err;
- u32 new_rx_count, new_tx_count, old_size;
- dma_addr_t old_dma;
+ struct igb_ring *temp_ring;
+ int i, err = 0;
+ u16 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
+ new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
+ new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
+ new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
+ new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */
return 0;
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
- if (netif_running(adapter->netdev))
- igb_down(adapter);
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+ temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
+ else
+ temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
+
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igb_down(adapter);
/*
* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
* to the tx and rx ring structs.
*/
- if (new_tx_count != adapter->tx_ring->count) {
+ if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- /* Save existing descriptor ring */
- old_buf = adapter->tx_ring[i].buffer_info;
- old_desc = adapter->tx_ring[i].desc;
- old_size = adapter->tx_ring[i].size;
- old_dma = adapter->tx_ring[i].dma;
- /* Try to allocate a new one */
- adapter->tx_ring[i].buffer_info = NULL;
- adapter->tx_ring[i].desc = NULL;
- adapter->tx_ring[i].count = new_tx_count;
- err = igb_setup_tx_resources(adapter,
- &adapter->tx_ring[i]);
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igb_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = igb_setup_tx_resources(&temp_ring[i]);
if (err) {
- /* Restore the old one so at least
- the adapter still works, even if
- we failed the request */
- adapter->tx_ring[i].buffer_info = old_buf;
- adapter->tx_ring[i].desc = old_desc;
- adapter->tx_ring[i].size = old_size;
- adapter->tx_ring[i].dma = old_dma;
+ while (i) {
+ i--;
+ igb_free_tx_resources(&temp_ring[i]);
+ }
goto err_setup;
}
- /* Free the old buffer manually */
- vfree(old_buf);
- pci_free_consistent(adapter->pdev, old_size,
- old_desc, old_dma);
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igb_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
}
- if (new_rx_count != adapter->rx_ring->count) {
+ if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igb_ring));
- old_rx_buf = adapter->rx_ring[i].buffer_info;
- old_desc = adapter->rx_ring[i].desc;
- old_size = adapter->rx_ring[i].size;
- old_dma = adapter->rx_ring[i].dma;
-
- adapter->rx_ring[i].buffer_info = NULL;
- adapter->rx_ring[i].desc = NULL;
- adapter->rx_ring[i].dma = 0;
- adapter->rx_ring[i].count = new_rx_count;
- err = igb_setup_rx_resources(adapter,
- &adapter->rx_ring[i]);
+ temp_ring[i].count = new_rx_count;
+ err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
- adapter->rx_ring[i].buffer_info = old_rx_buf;
- adapter->rx_ring[i].desc = old_desc;
- adapter->rx_ring[i].size = old_size;
- adapter->rx_ring[i].dma = old_dma;
+ while (i) {
+ i--;
+ igb_free_rx_resources(&temp_ring[i]);
+ }
goto err_setup;
}
- vfree(old_rx_buf);
- pci_free_consistent(adapter->pdev, old_size, old_desc,
- old_dma);
}
- }
- err = 0;
-err_setup:
- if (netif_running(adapter->netdev))
- igb_up(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igb_free_rx_resources(adapter->rx_ring[i]);
+
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
+ adapter->rx_ring_count = new_rx_count;
+ }
+err_setup:
+ igb_up(adapter);
+ vfree(temp_ring);
+clear_reset:
clear_bit(__IGB_RESETTING, &adapter->state);
return err;
}
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82580 reg test */
+static struct igb_reg_test reg_test_82580[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* RDH is read-only for 82580, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
/* 82576 reg test */
static struct igb_reg_test reg_test_82576[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* Enable all RX queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- u32 _test[] =
+ static const u32 _test[] =
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
- writel((_test[pat] & write), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ wr32(reg, (_test[pat] & write));
+ val = rd32(reg);
if (val != (_test[pat] & write & mask)) {
dev_err(&adapter->pdev->dev, "pattern test reg %04X "
"failed: got 0x%08X expected 0x%08X\n",
return 1;
}
}
+
return 0;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 val;
- writel((write & mask), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ wr32(reg, write & mask);
+ val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
" got 0x%08X expected 0x%08X\n", reg,
*data = reg;
return 1;
}
+
return 0;
}
u32 value, before, after;
u32 i, toggle;
- toggle = 0x7FFFF3FF;
-
switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
+ case e1000_82580:
+ test = reg_test_82580;
+ toggle = 0x7FEFF3FF;
+ break;
case e1000_82576:
test = reg_test_82576;
+ toggle = 0x7FFFF3FF;
break;
default:
test = reg_test_82575;
+ toggle = 0x7FFFF3FF;
break;
}
for (i = 0; i < test->array_len; i++) {
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * test->reg_offset),
+ REG_PATTERN_TEST(test->reg +
+ (i * test->reg_offset),
test->mask,
test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * test->reg_offset),
+ REG_SET_AND_CHECK(test->reg +
+ (i * test->reg_offset),
test->mask,
test->write);
break;
*data = 0;
/* Read and add up the contents of the EEPROM */
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp))
- < 0) {
+ if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
*data = 1;
break;
}
static irqreturn_t igb_test_intr(int irq, void *data)
{
- struct net_device *netdev = (struct net_device *) data;
- struct igb_adapter *adapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
struct e1000_hw *hw = &adapter->hw;
adapter->test_icr |= rd32(E1000_ICR);
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 mask, i = 0, shared_int = true;
+ u32 mask, ics_mask, i = 0, shared_int = true;
u32 irq = adapter->pdev->irq;
*data = 0;
/* Hook up test interrupt handler just for this test */
if (adapter->msix_entries) {
- /* NOTE: we don't test MSI-X interrupts here, yet */
- return 0;
+ if (request_irq(adapter->msix_entries[0].vector,
+ igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
- if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
+ if (request_irq(irq,
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
- } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
+ } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, adapter)) {
shared_int = false;
- } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
+ netdev->name, adapter)) {
*data = 1;
return -1;
}
(shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
- wr32(E1000_IMC, 0xFFFFFFFF);
+ wr32(E1000_IMC, ~0);
msleep(10);
+ /* Define all writable bits for ICS */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ ics_mask = 0x37F47EDD;
+ break;
+ case e1000_82576:
+ ics_mask = 0x77D4FBFD;
+ break;
+ case e1000_82580:
+ ics_mask = 0x77DCFED5;
+ break;
+ case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
+ default:
+ ics_mask = 0x7FFFFFFF;
+ break;
+ }
+
/* Test each interrupt */
- for (; i < 10; i++) {
+ for (; i < 31; i++) {
/* Interrupt to test */
mask = 1 << i;
+ if (!(mask & ics_mask))
+ continue;
+
if (!shared_int) {
/* Disable the interrupt to be reported in
* the cause register and then force the same
* test failed.
*/
adapter->test_icr = 0;
- wr32(E1000_IMC, ~mask & 0x00007FFF);
- wr32(E1000_ICS, ~mask & 0x00007FFF);
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMC, mask);
+ wr32(E1000_ICS, mask);
msleep(10);
if (adapter->test_icr & mask) {
* test failed.
*/
adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
msleep(10);
* test failed.
*/
adapter->test_icr = 0;
- wr32(E1000_IMC, ~mask & 0x00007FFF);
- wr32(E1000_ICS, ~mask & 0x00007FFF);
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMC, ~mask);
+ wr32(E1000_ICS, ~mask);
msleep(10);
- if (adapter->test_icr) {
+ if (adapter->test_icr & mask) {
*data = 5;
break;
}
}
/* Disable all the interrupts */
- wr32(E1000_IMC, 0xFFFFFFFF);
+ wr32(E1000_IMC, ~0);
msleep(10);
/* Unhook test interrupt handler */
- free_irq(irq, netdev);
+ if (adapter->msix_entries)
+ free_irq(adapter->msix_entries[0].vector, adapter);
+ else
+ free_irq(irq, adapter);
return *data;
}
static void igb_free_desc_rings(struct igb_adapter *adapter)
{
- struct igb_ring *tx_ring = &adapter->test_tx_ring;
- struct igb_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int i;
-
- if (tx_ring->desc && tx_ring->buffer_info) {
- for (i = 0; i < tx_ring->count; i++) {
- struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
- if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
- if (buf->skb)
- dev_kfree_skb(buf->skb);
- }
- }
-
- if (rx_ring->desc && rx_ring->buffer_info) {
- for (i = 0; i < rx_ring->count; i++) {
- struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
- if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
- IGB_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
- if (buf->skb)
- dev_kfree_skb(buf->skb);
- }
- }
-
- if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
- tx_ring->desc = NULL;
- }
- if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
- rx_ring->desc = NULL;
- }
-
- kfree(tx_ring->buffer_info);
- tx_ring->buffer_info = NULL;
- kfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
-
- return;
+ igb_free_tx_resources(&adapter->test_tx_ring);
+ igb_free_rx_resources(&adapter->test_rx_ring);
}
static int igb_setup_desc_rings(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
struct igb_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- u32 rctl;
- int i, ret_val;
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
/* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IGB_DEFAULT_TXD;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->reg_idx = adapter->vfs_allocated_count;
- if (!tx_ring->count)
- tx_ring->count = IGB_DEFAULT_TXD;
-
- tx_ring->buffer_info = kcalloc(tx_ring->count,
- sizeof(struct igb_buffer),
- GFP_KERNEL);
- if (!tx_ring->buffer_info) {
+ if (igb_setup_tx_resources(tx_ring)) {
ret_val = 1;
goto err_nomem;
}
- tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
- if (!tx_ring->desc) {
- ret_val = 2;
- goto err_nomem;
- }
- tx_ring->next_to_use = tx_ring->next_to_clean = 0;
-
- wr32(E1000_TDBAL(0),
- ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
- wr32(E1000_TDLEN(0),
- tx_ring->count * sizeof(struct e1000_tx_desc));
- wr32(E1000_TDH(0), 0);
- wr32(E1000_TDT(0), 0);
- wr32(E1000_TCTL,
- E1000_TCTL_PSP | E1000_TCTL_EN |
- E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
- E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
-
- for (i = 0; i < tx_ring->count; i++) {
- struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
- struct sk_buff *skb;
- unsigned int size = 1024;
-
- skb = alloc_skb(size, GFP_KERNEL);
- if (!skb) {
- ret_val = 3;
- goto err_nomem;
- }
- skb_put(skb, size);
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].length = skb->len;
- tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
- tx_desc->lower.data = cpu_to_le32(skb->len);
- tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
- E1000_TXD_CMD_IFCS |
- E1000_TXD_CMD_RS);
- tx_desc->upper.data = 0;
- }
+ igb_setup_tctl(adapter);
+ igb_configure_tx_ring(adapter, tx_ring);
/* Setup Rx descriptor ring and Rx buffers */
-
- if (!rx_ring->count)
- rx_ring->count = IGB_DEFAULT_RXD;
-
- rx_ring->buffer_info = kcalloc(rx_ring->count,
- sizeof(struct igb_buffer),
- GFP_KERNEL);
- if (!rx_ring->buffer_info) {
- ret_val = 4;
+ rx_ring->count = IGB_DEFAULT_RXD;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
+ rx_ring->reg_idx = adapter->vfs_allocated_count;
+
+ if (igb_setup_rx_resources(rx_ring)) {
+ ret_val = 3;
goto err_nomem;
}
- rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
- if (!rx_ring->desc) {
- ret_val = 5;
- goto err_nomem;
- }
- rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+ /* set the default queue to queue 0 of PF */
+ wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
- rctl = rd32(E1000_RCTL);
- wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
- wr32(E1000_RDBAL(0),
- ((u64) rx_ring->dma & 0xFFFFFFFF));
- wr32(E1000_RDBAH(0),
- ((u64) rx_ring->dma >> 32));
- wr32(E1000_RDLEN(0), rx_ring->size);
- wr32(E1000_RDH(0), 0);
- wr32(E1000_RDT(0), 0);
- rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
- wr32(E1000_RCTL, rctl);
- wr32(E1000_SRRCTL(0), 0);
+ /* enable receive ring */
+ igb_setup_rctl(adapter);
+ igb_configure_rx_ring(adapter, rx_ring);
- for (i = 0; i < rx_ring->count; i++) {
- struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
- struct sk_buff *skb;
-
- skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
- GFP_KERNEL);
- if (!skb) {
- ret_val = 6;
- goto err_nomem;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- rx_ring->buffer_info[i].skb = skb;
- rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
- rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
- memset(skb->data, 0x00, skb->len);
- }
+ igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
return 0;
struct e1000_hw *hw = &adapter->hw;
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
- hw->phy.ops.write_phy_reg(hw, 29, 0x001F);
- hw->phy.ops.write_phy_reg(hw, 30, 0x8FFC);
- hw->phy.ops.write_phy_reg(hw, 29, 0x001A);
- hw->phy.ops.write_phy_reg(hw, 30, 0x8FF0);
+ igb_write_phy_reg(hw, 29, 0x001F);
+ igb_write_phy_reg(hw, 30, 0x8FFC);
+ igb_write_phy_reg(hw, 29, 0x001A);
+ igb_write_phy_reg(hw, 30, 0x8FF0);
}
static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u32 stat_reg = 0;
hw->mac.autoneg = false;
if (hw->phy.type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
- hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
/* autoneg off */
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else if (hw->phy.type == e1000_phy_82580) {
+ /* enable MII loopback */
+ igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
}
ctrl_reg = rd32(E1000_CTRL);
/* force 1000, set loopback */
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = rd32(E1000_CTRL);
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ E1000_CTRL_FD | /* Force Duplex to FULL */
+ E1000_CTRL_SLU); /* Set link up enable bit */
- if (hw->phy.media_type == e1000_media_type_copper &&
- hw->phy.type == e1000_phy_m88)
+ if (hw->phy.type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
- else {
- /* Set the ILOS bit on the fiber Nic if half duplex link is
- * detected. */
- stat_reg = rd32(E1000_STATUS);
- if ((stat_reg & E1000_STATUS_FD) == 0)
- ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
- }
wr32(E1000_CTRL, ctrl_reg);
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- if (hw->phy.media_type == e1000_media_type_fiber ||
- hw->phy.media_type == e1000_media_type_internal_serdes) {
+ reg = rd32(E1000_CTRL_EXT);
+
+ /* use CTRL_EXT to identify link type as SGMII can appear as copper */
+ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
reg = rd32(E1000_RCTL);
reg |= E1000_RCTL_LBM_TCVR;
wr32(E1000_RCTL, reg);
E1000_CTRL_TFCE |
E1000_CTRL_LRST);
reg |= E1000_CTRL_SLU |
- E1000_CTRL_FD;
+ E1000_CTRL_FD;
wr32(E1000_CTRL, reg);
/* Unset switch control to serdes energy detect */
wr32(E1000_PCS_LCTL, reg);
return 0;
- } else if (hw->phy.media_type == e1000_media_type_copper) {
- return igb_set_phy_loopback(adapter);
}
- return 7;
+ return igb_set_phy_loopback(adapter);
}
static void igb_loopback_cleanup(struct igb_adapter *adapter)
wr32(E1000_RCTL, rctl);
hw->mac.autoneg = true;
- hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_reg);
+ igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
igb_phy_sw_reset(hw);
}
}
unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ frame_size /= 2;
+ memset(&skb->data[frame_size], 0xAA, frame_size - 1);
+ memset(&skb->data[frame_size + 10], 0xBE, 1);
+ memset(&skb->data[frame_size + 12], 0xAF, 1);
}
static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
{
- frame_size &= ~1;
- if (*(skb->data + 3) == 0xFF)
- if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ frame_size /= 2;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size + 10) == 0xBE) &&
+ (*(skb->data + frame_size + 12) == 0xAF)) {
return 0;
+ }
+ }
return 13;
}
+static int igb_clean_test_rings(struct igb_ring *rx_ring,
+ struct igb_ring *tx_ring,
+ unsigned int size)
+{
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_buffer *buffer_info;
+ int rx_ntc, tx_ntc, count = 0;
+ u32 staterr;
+
+ /* initialize next to clean and descriptor values */
+ rx_ntc = rx_ring->next_to_clean;
+ tx_ntc = tx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ /* check rx buffer */
+ buffer_info = &rx_ring->buffer_info[rx_ntc];
+
+ /* unmap rx buffer, will be remapped by alloc_rx_buffers */
+ dma_unmap_single(rx_ring->dev,
+ buffer_info->dma,
+ rx_ring->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ /* verify contents of skb */
+ if (!igb_check_lbtest_frame(buffer_info->skb, size))
+ count++;
+
+ /* unmap buffer on tx side */
+ buffer_info = &tx_ring->buffer_info[tx_ntc];
+ igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+
+ /* increment rx/tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+
+ /* fetch next descriptor */
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ /* re-map buffers to ring, store next to clean values */
+ igb_alloc_rx_buffers_adv(rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+ tx_ring->next_to_clean = tx_ntc;
+
+ return count;
+}
+
static int igb_run_loopback_test(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
struct igb_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int i, j, k, l, lc, good_cnt;
- int ret_val = 0;
- unsigned long time;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size = 1024;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
- wr32(E1000_RDT(0), rx_ring->count - 1);
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return 11;
- /* Calculate the loop count based on the largest descriptor ring
+ /* place data into test skb */
+ igb_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
else
lc = ((rx_ring->count / 64) * 2) + 1;
- k = l = 0;
for (j = 0; j <= lc; j++) { /* loop count loop */
- for (i = 0; i < 64; i++) { /* send the packets */
- igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
- 1024);
- pci_dma_sync_single_for_device(pdev,
- tx_ring->buffer_info[k].dma,
- tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
- k++;
- if (k == tx_ring->count)
- k = 0;
- }
- wr32(E1000_TDT(0), k);
- msleep(200);
- time = jiffies; /* set the start time for the receive */
+ /* reset count of good packets */
good_cnt = 0;
- do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
- rx_ring->buffer_info[l].dma,
- IGB_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
-
- ret_val = igb_check_lbtest_frame(
- rx_ring->buffer_info[l].skb, 1024);
- if (!ret_val)
+
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < 64; i++) {
+ skb_get(skb);
+ tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
+ if (tx_ret_val == NETDEV_TX_OK)
good_cnt++;
- l++;
- if (l == rx_ring->count)
- l = 0;
- /* time + 20 msecs (200 msecs on 2.4) is more than
- * enough time to complete the receives, if it's
- * exceeded, break and error off
- */
- } while (good_cnt < 64 && jiffies < (time + 20));
+ }
+
if (good_cnt != 64) {
- ret_val = 13; /* ret_val is the same as mis-compare */
+ ret_val = 12;
break;
}
- if (jiffies >= (time + 20)) {
- ret_val = 14; /* error code for time out error */
+
+ /* allow 200 milliseconds for packets to go from tx to rx */
+ msleep(200);
+
+ good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+ if (good_cnt != 64) {
+ ret_val = 13;
break;
}
} /* end loop count loop */
+
+ /* free the original skb */
+ kfree_skb(skb);
+
return ret_val;
}
if (hw->mac.autoneg)
msleep(4000);
- if (!(rd32(E1000_STATUS) &
- E1000_STATUS_LU))
+ if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;
}
return *data;
dev_info(&adapter->pdev->dev, "offline testing starting\n");
+ /* power up link for link test */
+ igb_power_up_link(adapter);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
if (igb_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter);
+ /* power up link for loopback test */
+ igb_power_up_link(adapter);
if (igb_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
dev_open(netdev);
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* Online tests */
- if (igb_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* PHY is powered down when interface is down */
+ if (!netif_carrier_ok(netdev)) {
+ data[4] = 0;
+ } else {
+ if (igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
/* Online tests aren't run; pass by default */
data[0] = 0;
/* return success for non excluded adapter ports */
retval = 0;
break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ /* quad port adapters only support WoL on port A */
+ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+ if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
struct igb_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
}
static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
if (igb_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return wol->wolopts ? -EOPNOTSUPP : 0;
- switch (hw->device_id) {
- default:
- break;
- }
-
/* these settings will always override what we currently have */
adapter->wol = 0;
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
-
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
-/* toggle LED 4 times per second = 2 "blinks" per second */
-#define IGB_ID_INTERVAL (HZ/4)
-
/* bit defines for adapter->led_status */
#define IGB_LED_ON 0
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long timeout;
- if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
- data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+ timeout = data * 1000;
+
+ /*
+ * msleep_interruptable only accepts unsigned int so we are limited
+ * in how long a duration we can wait
+ */
+ if (!timeout || timeout > UINT_MAX)
+ timeout = UINT_MAX;
igb_blink_led(hw);
- msleep_interruptible(data * 1000);
+ msleep_interruptible(timeout);
igb_led_off(hw);
clear_bit(IGB_LED_ON, &adapter->led_status);
struct ethtool_coalesce *ec)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
int i;
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
+ if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->tx_coalesce_usecs > 3) &&
+ (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+ (ec->tx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
/* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
- adapter->itr_setting = ec->rx_coalesce_usecs;
- adapter->itr = IGB_START_ITR;
- } else {
- adapter->itr_setting = ec->rx_coalesce_usecs << 2;
- adapter->itr = adapter->itr_setting;
- }
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
- for (i = 0; i < adapter->num_rx_queues; i++)
- wr32(adapter->rx_ring[i].itr_register, adapter->itr);
+ /* convert to rate of irq's per second */
+ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ if (q_vector->rx_ring)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ else
+ q_vector->itr_val = adapter->tx_itr_setting;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGB_START_ITR;
+ q_vector->set_itr = 1;
+ }
return 0;
}
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->itr_setting;
+ if (adapter->rx_itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
else
- ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+ if (adapter->tx_itr_setting <= 3)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+ }
return 0;
}
-
static int igb_nway_reset(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct ethtool_stats *stats, u64 *data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *net_stats = &netdev->stats;
u64 *queue_stat;
- int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
- int j;
- int i;
-#ifdef CONFIG_IGB_LRO
- int aggregated = 0, flushed = 0, no_desc = 0;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
- flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
- no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
- }
- adapter->lro_aggregated = aggregated;
- adapter->lro_flushed = flushed;
- adapter->lro_no_desc = no_desc;
-#endif
+ int i, j, k;
+ char *p;
igb_update_stats(adapter);
+
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
+ p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
data[i] = (igb_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
+ data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
for (j = 0; j < adapter->num_tx_queues; j++) {
- int k;
- queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- i += k;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
+ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- int k;
- queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- i += k;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
+ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
}
}
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igb_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < adapter->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
-static struct ethtool_ops igb_ethtool_ops = {
+static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
.get_drvinfo = igb_get_drvinfo,
.get_msglevel = igb_get_msglevel,
.set_msglevel = igb_set_msglevel,
.nway_reset = igb_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = igb_get_link,
.get_eeprom_len = igb_get_eeprom_len,
.get_eeprom = igb_get_eeprom,
.set_eeprom = igb_set_eeprom,