char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.20-k3-NAPI"
+#define DRV_VERSION "7.3.21-k3-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
+ struct e1000_rx_ring *rx_ring,
int cleaned_count);
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
static int e1000_resume(struct pci_dev *pdev);
#endif
static void e1000_shutdown(struct pci_dev *pdev);
e1000_irq_enable(adapter);
+ netif_wake_queue(adapter->netdev);
+
/* fire a link change interrupt to start the watchdog */
ew32(ICS, E1000_ICS_LSC);
return 0;
void e1000_down(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ u32 rctl, tctl;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__E1000_DOWN, &adapter->flags);
+ /* disable receives in the hardware */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ /* can be netif_tx_disable when NETIF_F_LLTX is removed */
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
adapter->link_speed = 0;
adapter->link_duplex = 0;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
e1000_reset(adapter);
e1000_clean_all_tx_rings(adapter);
{
struct e1000_hw *hw = &adapter->hw;
u32 pba = 0, tx_space, min_tx_space, min_rx_space;
- u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
bool legacy_pba_adjust = false;
+ u16 hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
}
if (legacy_pba_adjust) {
- if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
+ if (hw->max_frame_size > E1000_RXBUFFER_8192)
pba -= 8; /* allocate more FIFO for Tx */
if (hw->mac_type == e1000_82547) {
(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
atomic_set(&adapter->tx_fifo_stall, 0);
}
- } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
/* adjust PBA for jumbo frames */
ew32(PBA, pba);
/* To maintain wire speed transmits, the Tx FIFO should be
- * large enough to accomodate two full transmit packets,
+ * large enough to accommodate two full transmit packets,
* rounded up to the next 1KB and expressed in KB. Likewise,
- * the Rx FIFO should be large enough to accomodate at least
+ * the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
* expressed in KB. */
pba = er32(PBA);
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /* don't include ethernet FCS because hardware appends/strips */
- min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
- VLAN_TAG_SIZE;
- min_tx_space = min_rx_space;
- min_tx_space *= 2;
+ /*
+ * the tx fifo also stores 16 bytes of information about the tx
+ * but don't include ethernet FCS because hardware appends it
+ */
+ min_tx_space = (hw->max_frame_size +
+ sizeof(struct e1000_tx_desc) -
+ ETH_FCS_LEN) * 2;
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = hw->max_frame_size;
min_rx_space = ALIGN(min_rx_space, 1024);
min_rx_space >>= 10;
ew32(PBA, pba);
- /* flow control settings */
- /* Set the FC high water mark to 90% of the FIFO size.
- * Required to clear last 3 LSB */
- fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
- /* We can't use 90% on small FIFOs because the remainder
- * would be less than 1 full frame. In this case, we size
- * it to allow at least a full frame above the high water
- * mark. */
- if (pba < E1000_PBA_16K)
- fc_high_water_mark = (pba * 1024) - 1600;
-
- hw->fc_high_water = fc_high_water_mark;
- hw->fc_low_water = fc_high_water_mark - 8;
- if (hw->mac_type == e1000_80003es2lan)
- hw->fc_pause_time = 0xFFFF;
- else
- hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+ /*
+ * flow control settings:
+ * The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, and
+ * - the full Rx FIFO size minus the early receive size (for parts
+ * with ERT support assuming ERT set to E1000_ERT_2048), or
+ * - the full Rx FIFO size minus one full frame
+ */
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - hw->max_frame_size));
+
+ hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
+ hw->fc_low_water = hw->fc_high_water - 8;
+ hw->fc_pause_time = E1000_FC_PAUSE_TIME;
hw->fc_send_xon = 1;
hw->fc = hw->original_fc;
}
}
+static const struct net_device_ops e1000_netdev_ops = {
+ .ndo_open = e1000_open,
+ .ndo_stop = e1000_close,
+ .ndo_start_xmit = e1000_xmit_frame,
+ .ndo_get_stats = e1000_get_stats,
+ .ndo_set_rx_mode = e1000_set_rx_mode,
+ .ndo_set_mac_address = e1000_set_mac,
+ .ndo_tx_timeout = e1000_tx_timeout,
+ .ndo_change_mtu = e1000_change_mtu,
+ .ndo_do_ioctl = e1000_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+
+ .ndo_vlan_rx_register = e1000_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = e1000_netpoll,
+#endif
+};
+
/**
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
- DECLARE_MAC_BUF(mac);
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
err = pci_enable_device(pdev);
} else {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_enable_device(pdev);
+ err = pci_enable_device_mem(pdev);
}
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
- !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
E1000_ERR("No usable DMA configuration, "
"aborting\n");
hw->back = adapter;
err = -EIO;
- hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
- pci_resource_len(pdev, BAR_0));
+ hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
if (!hw->hw_addr)
goto err_ioremap;
}
}
- netdev->open = &e1000_open;
- netdev->stop = &e1000_close;
- netdev->hard_start_xmit = &e1000_xmit_frame;
- netdev->get_stats = &e1000_get_stats;
- netdev->set_rx_mode = &e1000_set_rx_mode;
- netdev->set_mac_address = &e1000_set_mac;
- netdev->change_mtu = &e1000_change_mtu;
- netdev->do_ioctl = &e1000_ioctl;
+ netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
- netdev->tx_timeout = &e1000_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
- netdev->vlan_rx_register = e1000_vlan_rx_register;
- netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
- netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = e1000_netpoll;
-#endif
+
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
adapter->bd_number = cards_found;
* because it depends on mac_type */
if ((hw->mac_type == e1000_ich8lan) &&
(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- hw->flash_address =
- ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ hw->flash_address = pci_ioremap_bar(pdev, 1);
if (!hw->flash_address)
goto err_flashmap;
}
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_LLTX;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* print bus type/speed/width info */
DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
(hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
"32-bit"));
- printk("%s\n", print_mac(mac, netdev->dev_addr));
+ printk("%pM\n", netdev->dev_addr);
if (hw->bus_type == e1000_bus_type_pci_express) {
DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
!e1000_check_mng_mode(hw))
e1000_get_hw_control(adapter);
- /* tell the stack to leave us alone until e1000_open() is called */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_register;
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
cards_found++;
if (hw->flash_address)
iounmap(hw->flash_address);
err_flashmap:
- for (i = 0; i < adapter->num_rx_queues; i++)
- dev_put(&adapter->polling_netdev[i]);
-
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
- kfree(adapter->polling_netdev);
err_sw_init:
iounmap(hw->hw_addr);
err_ioremap:
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- int i;
cancel_work_sync(&adapter->reset_task);
* would have already happened in close and is redundant. */
e1000_release_hw_control(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++)
- dev_put(&adapter->polling_netdev[i]);
-
unregister_netdev(netdev);
if (!e1000_check_phy_reset_block(hw))
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
- kfree(adapter->polling_netdev);
iounmap(hw->hw_addr);
if (hw->flash_address)
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- int i;
/* PCI config space info */
return -ENOMEM;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->polling_netdev[i].priv = adapter;
- dev_hold(&adapter->polling_netdev[i]);
- set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
- }
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
* @adapter: board private structure to initialize
*
* We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time. The polling_netdev array is
- * intended for Multiqueue, but should work fine with a single queue.
+ * number of queues at compile-time.
**/
static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
return -ENOMEM;
}
- adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
- sizeof(struct net_device),
- GFP_KERNEL);
- if (!adapter->polling_netdev) {
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- return -ENOMEM;
- }
-
return E1000_SUCCESS;
}
if (test_bit(__E1000_TESTING, &adapter->flags))
return -EBUSY;
+ netif_carrier_off(netdev);
+
/* allocate transmit descriptors */
err = e1000_setup_all_tx_resources(adapter);
if (err)
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
- spin_lock_init(&txdr->tx_lock);
return 0;
}
rxdr->next_to_clean = 0;
rxdr->next_to_use = 0;
+ rxdr->rx_skb_top = NULL;
return 0;
}
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_hw *hw = &adapter->hw;
u32 rdlen, rctl, rxcsum, ctrl_ext;
- rdlen = adapter->rx_ring[0].count *
- sizeof(struct e1000_rx_desc);
- adapter->clean_rx = e1000_clean_rx_irq;
- adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+ } else {
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+ }
/* disable receives while setting up the descriptors */
rctl = er32(RCTL);
static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
- if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
+ buffer_info->dma = 0;
if (buffer_info->skb) {
+ skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
+ buffer_info->time_stamp = 0;
/* buffer_info must be completely set up in the transmit path */
}
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->skb) {
- pci_unmap_single(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ if (buffer_info->dma &&
+ adapter->clean_rx == e1000_clean_rx_irq) {
+ pci_unmap_single(pdev, buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+ } else if (buffer_info->dma &&
+ adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+ }
+ buffer_info->dma = 0;
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ }
+ if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
}
+ /* there also may be some cached data from a chained receive */
+ if (rx_ring->rx_skb_top) {
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ }
+
size = sizeof(struct e1000_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
-
memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_clean = 0;
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_addr_list *uc_ptr;
+ struct netdev_hw_addr *ha;
+ bool use_uc = false;
struct dev_addr_list *mc_ptr;
u32 rctl;
u32 hash_value;
int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
E1000_NUM_MTA_REGISTERS_ICH8LAN :
E1000_NUM_MTA_REGISTERS;
+ u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
+
+ if (!mcarray) {
+ DPRINTK(PROBE, ERR, "memory allocation failed\n");
+ return;
+ }
if (hw->mac_type == e1000_ich8lan)
rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
rctl |= E1000_RCTL_VFE;
}
- uc_ptr = NULL;
- if (netdev->uc_count > rar_entries - 1) {
+ if (netdev->uc.count > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
- uc_ptr = netdev->uc_list;
+ use_uc = true;
}
ew32(RCTL, rctl);
* if there are not 14 addresses, go ahead and clear the filters
* -- with 82571 controllers only 0-13 entries are filled here
*/
+ i = 1;
+ if (use_uc)
+ list_for_each_entry(ha, &netdev->uc.list, list) {
+ if (i == rar_entries)
+ break;
+ e1000_rar_set(hw, ha->addr, i++);
+ }
+
+ WARN_ON(i == rar_entries);
+
mc_ptr = netdev->mc_list;
- for (i = 1; i < rar_entries; i++) {
- if (uc_ptr) {
- e1000_rar_set(hw, uc_ptr->da_addr, i);
- uc_ptr = uc_ptr->next;
- } else if (mc_ptr) {
+ for (; i < rar_entries; i++) {
+ if (mc_ptr) {
e1000_rar_set(hw, mc_ptr->da_addr, i);
mc_ptr = mc_ptr->next;
} else {
E1000_WRITE_FLUSH();
}
}
- WARN_ON(uc_ptr != NULL);
-
- /* clear the old settings from the multicast hash table */
-
- for (i = 0; i < mta_reg_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
- E1000_WRITE_FLUSH();
- }
/* load any remaining addresses into the hash table */
for (; mc_ptr; mc_ptr = mc_ptr->next) {
+ u32 hash_reg, hash_bit, mta;
hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
- e1000_mta_set(hw, hash_value);
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+ mta = (1 << hash_bit);
+ mcarray[hash_reg] |= mta;
+ }
+
+ /* write the hash table completely, write from bottom to avoid
+ * both stupid write combining chipsets, and flushing each write */
+ for (i = mta_reg_count - 1; i >= 0 ; i--) {
+ /*
+ * If we are on an 82544 has an errata where writing odd
+ * offsets overwrites the previous even offset, but writing
+ * backwards over the range solves the issue by always
+ * writing the odd offset first
+ */
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
}
+ E1000_WRITE_FLUSH();
if (hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
+
+ kfree(mcarray);
}
/* Need to wait a few seconds after link up to get diagnostic information from
&adapter->link_duplex);
ctrl = er32(CTRL);
- DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
+ printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
"Full Duplex" : "Half Duplex",
((ctrl & E1000_CTRL_TFCE) && (ctrl &
E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
ew32(TCTL, tctl);
netif_carrier_on(netdev);
- netif_wake_queue(netdev);
mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
adapter->smartspeed = 0;
} else {
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- DPRINTK(LINK, INFO, "NIC Link is Down\n");
+ printk(KERN_INFO "e1000: %s NIC Link is Down\n",
+ netdev->name);
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
/* 80003ES2LAN workaround--
* (Do the reset outside of interrupt context). */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
struct e1000_buffer *buffer_info;
unsigned int i;
u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- css = skb_transport_offset(skb);
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
- i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ DPRINTK(DRV, WARNING,
+ "checksum_partial proto=%x!\n", skb->protocol);
+ break;
+ }
- context_desc->lower_setup.ip_config = 0;
- context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso =
- css + skb->csum_offset;
- context_desc->upper_setup.tcp_fields.tucse = 0;
- context_desc->tcp_seg_setup.data = 0;
- context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
+ css = skb_transport_offset(skb);
- buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
- if (unlikely(++i == tx_ring->count)) i = 0;
- tx_ring->next_to_use = i;
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso =
+ css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
- return true;
- }
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
- return false;
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
}
#define E1000_MAX_TXD_PWR 12
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
- unsigned int len = skb->len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset, size, count = 0, i;
unsigned int f;
- len -= skb->data_len;
+ dma_addr_t *map;
i = tx_ring->next_to_use;
+ if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+ return 0;
+ }
+
+ map = skb_shinfo(skb)->dma_maps;
+ offset = 0;
+
while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
size -= 4;
buffer_info->length = size;
- buffer_info->dma =
- pci_map_single(adapter->pdev,
- skb->data + offset,
- size,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
len -= size;
offset += size;
count++;
- if (unlikely(++i == tx_ring->count)) i = 0;
+ if (len) {
+ i++;
+ if (unlikely(i == tx_ring->count))
+ i = 0;
+ }
}
for (f = 0; f < nr_frags; f++) {
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
+ i++;
+ if (unlikely(i == tx_ring->count))
+ i = 0;
+
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
size -= 4;
buffer_info->length = size;
- buffer_info->dma =
- pci_map_page(adapter->pdev,
- frag->page,
- offset,
- size,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = map[f] + offset;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
len -= size;
offset += size;
count++;
- if (unlikely(++i == tx_ring->count)) i = 0;
}
}
- i = (i == 0) ? tx_ring->count - 1 : i - 1;
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len;
- unsigned long flags;
unsigned int nr_frags;
unsigned int mss;
int count = 0;
(hw->mac_type == e1000_82573))
e1000_transfer_dhcp_info(adapter, skb);
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
- if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
- }
if (unlikely(hw->mac_type == e1000_82547)) {
if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
}
tso = e1000_tso(adapter, tx_ring, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
- e1000_tx_queue(adapter, tx_ring, tx_flags,
- e1000_tx_map(adapter, tx_ring, skb, first,
- max_per_txd, nr_frags, mss));
+ count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
+ nr_frags, mss);
- netdev->trans_start = jiffies;
+ if (count) {
+ e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
- /* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
case e1000_ich8lan:
- if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
}
&eeprom_data);
if ((hw->device_id != E1000_DEV_ID_82573L) ||
(eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
- if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
DPRINTK(PROBE, ERR,
"Jumbo Frames not supported.\n");
return -EINVAL;
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
- * larger slab size
- * i.e. RXBUFFER_2048 --> size-4096 slab */
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs */
if (max_frame <= E1000_RXBUFFER_256)
adapter->rx_buffer_len = E1000_RXBUFFER_256;
adapter->rx_buffer_len = E1000_RXBUFFER_1024;
else if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- else if (max_frame <= E1000_RXBUFFER_4096)
- adapter->rx_buffer_len = E1000_RXBUFFER_4096;
- else if (max_frame <= E1000_RXBUFFER_8192)
- adapter->rx_buffer_len = E1000_RXBUFFER_8192;
- else if (max_frame <= E1000_RXBUFFER_16384)
+ else
+#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = PAGE_SIZE;
+#endif
/* adjust allocation if LPE protects us, and we aren't using SBP */
if (!hw->tbi_compatibility_on &&
- ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+ ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(netdev, &adapter->napi);
+ __napi_schedule(&adapter->napi);
} else
e1000_irq_enable(adapter);
struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR);
- if (unlikely(!icr))
+ if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
ew32(IMC, ~0);
E1000_WRITE_FLUSH();
}
- if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(netdev, &adapter->napi);
- } else
+ __napi_schedule(&adapter->napi);
+ } else {
/* this really should not happen! if it does it is basically a
* bug, but not a hard error, so enable ints and continue */
- e1000_irq_enable(adapter);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+ }
return IRQ_HANDLED;
}
struct net_device *poll_dev = adapter->netdev;
int tx_cleaned = 0, work_done = 0;
- /* Must NOT use netdev_priv macro here. */
- adapter = poll_dev->priv;
+ adapter = netdev_priv(poll_dev);
- /* e1000_clean is called per-cpu. This lock protects
- * tx_ring[0] from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- spin_unlock(&adapter->tx_queue_lock);
- }
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, budget);
- if (tx_cleaned)
+ if (!tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- netif_rx_complete(poll_dev, napi);
- e1000_irq_enable(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
}
return work_done;
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
- bool cleaned = false;
unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
- while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
- for (cleaned = false; !cleaned; ) {
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ bool cleaned = false;
+ for ( ; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
-#define E1000_TX_WEIGHT 64
- /* weight of a sort for tx, to avoid endless transmit cleanup */
- if (count++ == E1000_TX_WEIGHT)
- break;
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD 32
- if (unlikely(cleaned && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(netdev) &&
E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[eop].dma &&
- time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
(adapter->tx_timeout_factor * HZ))
&& !(er32(STATUS) & E1000_STATUS_TXOFF)) {
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
+ tx_ring->buffer_info[i].time_stamp,
eop,
jiffies,
eop_desc->upper.fields.status);
adapter->total_tx_packets += total_tx_packets;
adapter->net_stats.tx_bytes += total_tx_bytes;
adapter->net_stats.tx_packets += total_tx_packets;
- return cleaned;
+ return (count < tx_ring->count);
}
/**
}
/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+ u16 length)
+{
+ bi->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ */
+static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
+ __le16 vlan, struct sk_buff *skb)
+{
+ if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ le16_to_cpu(vlan) &
+ E1000_RXD_SPC_VLAN_MASK);
+ } else {
+ netif_receive_skb(skb);
+ }
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ * @rx_ring: ring to clean
+ * @work_done: amount of napi work completed this call
+ * @work_to_do: max amount of work allowed for this call to do
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ */
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ unsigned long irq_flags;
+ u32 length;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->length);
+
+ /* errors is only valid for DD + EOP descriptors */
+ if (unlikely((status & E1000_RXD_STAT_EOP) &&
+ (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
+ u8 last_byte = *(skb->data + length - 1);
+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+ last_byte)) {
+ spin_lock_irqsave(&adapter->stats_lock,
+ irq_flags);
+ e1000_tbi_adjust_stats(hw, &adapter->stats,
+ length, skb->data);
+ spin_unlock_irqrestore(&adapter->stats_lock,
+ irq_flags);
+ length--;
+ } else {
+ /* recycle both page and skb */
+ buffer_info->skb = skb;
+ /* an error means any chain goes out the window
+ * too */
+ if (rx_ring->rx_skb_top)
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ goto next_desc;
+ }
+ }
+
+#define rxtop rx_ring->rx_skb_top
+ if (!(status & E1000_RXD_STAT_EOP)) {
+ /* this descriptor is only the beginning (or middle) */
+ if (!rxtop) {
+ /* this is the beginning of a chain */
+ rxtop = skb;
+ skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ 0, length);
+ } else {
+ /* this is the middle of a chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the skb, only consumed the page */
+ buffer_info->skb = skb;
+ }
+ e1000_consume_page(buffer_info, rxtop, length);
+ goto next_desc;
+ } else {
+ if (rxtop) {
+ /* end of the chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the current skb, we only consumed the
+ * page */
+ buffer_info->skb = skb;
+ skb = rxtop;
+ rxtop = NULL;
+ e1000_consume_page(buffer_info, skb, length);
+ } else {
+ /* no chain, got EOP, this buf is the packet
+ * copybreak to save the put_page/alloc_page */
+ if (length <= copybreak &&
+ skb_tailroom(skb) >= length) {
+ u8 *vaddr;
+ vaddr = kmap_atomic(buffer_info->page,
+ KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr, length);
+ kunmap_atomic(vaddr,
+ KM_SKB_DATA_SOFTIRQ);
+ /* re-use the page, so don't erase
+ * buffer_info->page */
+ skb_put(skb, length);
+ } else {
+ skb_fill_page_desc(skb, 0,
+ buffer_info->page, 0,
+ length);
+ e1000_consume_page(buffer_info, skb,
+ length);
+ }
+ }
+ }
+
+ /* Receive Checksum Offload XXX recompute due to CRC strip? */
+ e1000_rx_checksum(adapter,
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
+
+ pskb_trim(skb, skb->len - 4);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ DPRINTK(DRV, ERR, "pskb_may_pull failed.\n");
+ dev_kfree_skb(skb);
+ goto next_desc;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ e1000_receive_skb(adapter, status, rx_desc->special, skb);
+
+next_desc:
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
- **/
+ * @rx_ring: ring to clean
+ * @work_done: amount of napi work completed this call
+ * @work_to_do: max amount of work allowed for this call to do
+ */
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do)
struct e1000_buffer *buffer_info, *next_buffer;
unsigned long flags;
u32 length;
- u8 last_byte;
unsigned int i;
int cleaned_count = 0;
bool cleaned = false;
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev,
- buffer_info->dma,
- buffer_info->length,
+ pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
-
- if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+ /* !EOP means multiple descriptors were used to store a single
+ * packet, also make sure the frame isn't just CRC only */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
}
if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
- last_byte = *(skb->data + length - 1);
+ u8 last_byte = *(skb->data + length - 1);
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
skb->protocol = eth_type_trans(skb, netdev);
- if (unlikely(adapter->vlgrp &&
- (status & E1000_RXD_STAT_VP))) {
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special));
- } else {
- netif_receive_skb(skb);
- }
-
- netdev->last_rx = jiffies;
+ e1000_receive_skb(adapter, status, rx_desc->special, skb);
next_desc:
rx_desc->status = 0;
}
/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @rx_ring: pointer to receive ring structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void
+e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = 256 -
+ 16 /*for skb_reserve */ -
+ NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto check_page;
+ }
+
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ struct sk_buff *oldskb = skb;
+ DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
+ "at %p\n", bufsz, skb->data);
+ /* Try again, without freeing the previous */
+ skb = netdev_alloc_skb(netdev, bufsz);
+ /* Failed allocation, critical failure */
+ if (!skb) {
+ dev_kfree_skb(oldskb);
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ /* give up */
+ dev_kfree_skb(skb);
+ dev_kfree_skb(oldskb);
+ break; /* while (cleaned_count--) */
+ }
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
+ }
+ /* Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+check_page:
+ /* allocate a new page if necessary */
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!buffer_info->page)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ if (!buffer_info->dma)
+ buffer_info->dma = pci_map_page(pdev,
+ buffer_info->page, 0,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+ }
+}
+
+/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
/* Failed allocation, critical failure */
if (!skb) {
dev_kfree_skb(oldskb);
+ adapter->alloc_rx_buff_failed++;
break;
}
/* give up */
dev_kfree_skb(skb);
dev_kfree_skb(oldskb);
+ adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
}
map_skb:
buffer_info->dma = pci_map_single(pdev,
skb->data,
- adapter->rx_buffer_len,
+ buffer_info->length,
PCI_DMA_FROMDEVICE);
+ /*
+ * XXX if it was allocated cleanly it will never map to a
+ * boundary crossing
+ */
+
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
}
rx_desc = E1000_RX_DESC(*rx_ring, i);
return 0;
}
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
ew32(WUC, E1000_WUC_PME_EN);
ew32(WUFC, wufc);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- pci_enable_wake(pdev, PCI_D3cold, 1);
} else {
ew32(WUC, 0);
ew32(WUFC, 0);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
}
e1000_release_manageability(adapter);
+ *enable_wake = !!wufc;
+
/* make sure adapter isn't asleep if manageability is enabled */
- if (adapter->en_mng_pt) {
- pci_enable_wake(pdev, PCI_D3hot, 1);
- pci_enable_wake(pdev, PCI_D3cold, 1);
- }
+ if (adapter->en_mng_pt)
+ *enable_wake = true;
if (hw->phy_type == e1000_phy_igp_3)
e1000_phy_powerdown_workaround(hw);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
static int e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
static void e1000_shutdown(struct pci_dev *pdev)
{
- e1000_suspend(pdev, PMSG_SUSPEND);
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
}
#ifdef CONFIG_NET_POLL_CONTROLLER
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int err;
static void e1000_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
e1000_init_manageability(adapter);