/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
u32 reg = ioread32(addr);
u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (adapter->eeh_err)
+ return;
+
if (!enabled && enable)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
else if (enabled && !enable)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clear_int)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
struct net_device_stats *dev_stats = &adapter->netdev->stats;
struct be_erx_stats *erx_stats = &hw_stats->erx;
- dev_stats->rx_packets = port_stats->rx_total_frames;
- dev_stats->tx_packets = port_stats->tx_unicastframes +
- port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
- dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
- (u64) port_stats->rx_bytes_lsd;
- dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
- (u64) port_stats->tx_bytes_lsd;
+ dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
+ dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
+ dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
+ dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
}
static void be_tx_stats_update(struct be_adapter *adapter,
- u32 wrb_cnt, u32 copied, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
+ stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
if (stopped)
stats->be_tx_stops++;
}
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
+ be_tx_stats_update(adapter, wrb_cnt, copied,
+ skb_shinfo(skb)->gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
"MTU must be between %d and %d bytes\n",
- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
}
/*
- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
- * set the BE in promiscuous VLAN mode.
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
*/
static int be_vid_config(struct be_adapter *adapter)
{
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
- int status;
+ int status = 0;
- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
+ if (adapter->vlans_added <= adapter->max_vlans) {
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (adapter->vlan_tag[i]) {
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans++;
adapter->vlan_tag[vid] = 1;
-
- be_vid_config(adapter);
+ adapter->vlans_added++;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter);
}
static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans--;
adapter->vlan_tag[vid] = 0;
-
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
- be_vid_config(adapter);
+ adapter->vlans_added--;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter);
}
static void be_set_multicast_list(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
netdev_mc_count(netdev) > BE_MAX_MC) {
- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
+ be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
&adapter->mc_cmd_mem);
goto done;
}
- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
- netdev_mc_count(netdev), &adapter->mc_cmd_mem);
+ be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
+ &adapter->mc_cmd_mem);
done:
return;
}
stats->be_rx_compl++;
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
+ stats->be_rx_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user)
+ if (rx_page_info->last_page_user) {
pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ rx_page_info->last_page_user = false;
+ }
atomic_dec(&rxq->used);
return rx_page_info;
* indicated by rxcp.
*/
static void skb_fill_rx_data(struct be_adapter *adapter,
- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
+ struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
+ u16 num_rcvd)
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd, j;
+ u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
skb->data_len = curr_frag_len - hdr_len;
skb->tail += hdr_len;
}
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
{
struct sk_buff *skb;
u32 vlanf, vid;
+ u16 num_rcvd;
u8 vtm;
- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
-
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->cap & 0x400) && !vtm)
- vlanf = 0;
+ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ /* Is it a flush compl that has no data */
+ if (unlikely(num_rcvd == 0))
+ return;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
- if (!skb) {
+ if (unlikely(!skb)) {
if (net_ratelimit())
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
be_rx_compl_discard(adapter, rxcp);
return;
}
- skb_fill_rx_data(adapter, skb, rxcp);
+ skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->dev = adapter->netdev;
- if (vlanf) {
- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
+ vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+ vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->cap & 0x400) && !vtm)
+ vlanf = 0;
+
+ if (unlikely(vlanf)) {
+ if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
u8 vtm;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ /* Is it a flush compl that has no data */
+ if (unlikely(num_rcvd == 0))
+ return;
+
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
vid = be16_to_cpu(vid);
- if (!adapter->vlan_grp || adapter->num_vlans == 0)
+ if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0;
+ struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+ struct sk_buff *sent_skb;
+ bool dummy_wrb;
/* Wait for a max of 200ms for all the tx-completions to arrive. */
do {
if (atomic_read(&txq->used))
dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = sent_skbs[txq->tail];
+ end_idx = txq->tail;
+ index_adv(&end_idx,
+ wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+ be_tx_compl_process(adapter, end_idx);
+ }
}
static void be_mcc_queues_destroy(struct be_adapter *adapter)
q = &adapter->rx_obj.q;
if (q->created) {
be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
+
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to arrive
+ */
+ mdelay(1);
be_rx_q_clean(adapter);
}
be_queue_free(adapter, q);
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
- return eq_id - 8 * be_pci_func(adapter);
+ return eq_id % 8;
}
static irqreturn_t be_intx(int irq, void *dev)
return work_done;
}
-void be_process_tx(struct be_adapter *adapter)
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
- u32 num_cmpl = 0;
+ int tx_compl = 0, mcc_compl, status = 0;
u16 end_idx;
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
+ wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
- num_cmpl++;
+ tx_compl++;
+ }
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ napi_complete(napi);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
}
- if (num_cmpl) {
- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
+ if (tx_compl) {
+ be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
/* As Tx wrbs have been freed up, wake up netdev queue if
* it was stopped due to lack of tx wrbs.
}
drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ drvr_stats(adapter)->be_tx_compl += tx_compl;
}
-}
-
-/* As TX and MCC share the same EQ check for both TX and MCC completions.
- * For TX/MCC we don't honour budget; consume everything
- */
-static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
-{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
-
- napi_complete(napi);
-
- be_process_tx(adapter);
-
- be_process_mcc(adapter);
return 1;
}
/* Rx compl queue may be in unarmed state; rearm it */
be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
+ /* Now that interrupts are on we can process async mcc */
+ be_async_mcc_enable(adapter);
+
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
cancel_delayed_work_sync(&adapter->work);
+ be_async_mcc_disable(adapter);
+
netif_stop_queue(netdev);
netif_carrier_off(netdev);
adapter->link_up = false;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
struct flash_comp *pflashcomp;
+ int num_comp;
- struct flash_comp gen3_flash_types[8] = {
+ struct flash_comp gen3_flash_types[9] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g3}
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
+ FLASH_NCSI_IMAGE_MAX_SIZE_g3}
};
struct flash_comp gen2_flash_types[8] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
if (adapter->generation == BE_GEN3) {
pflashcomp = gen3_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g3);
+ num_comp = 9;
} else {
pflashcomp = gen2_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g2);
+ num_comp = 8;
}
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < num_comp; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+ continue;
if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
(!be_flash_redboot(adapter, fw->data,
pflashcomp[i].offset, pflashcomp[i].size,
struct be_dma_mem flash_cmd;
int status, i = 0;
const u8 *p;
- char fw_ver[FW_VER_LEN];
- char fw_cfg;
-
- status = be_cmd_get_fw_ver(adapter, fw_ver);
- if (status)
- return status;
- fw_cfg = *(fw_ver + 2);
- if (fw_cfg == '0')
- fw_cfg = '1';
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ pci_save_state(adapter->pdev);
return 0;
free_mbox:
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ if (adapter->cap & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
return 0;
}
return 0;
}
+/*
+ * An FLR will stop BE from DMAing any data.
+ */
+static void be_shutdown(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ be_cmd_reset_function(adapter);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+ return;
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
static struct pci_driver be_driver = {
.name = DRV_NAME,
.id_table = be_dev_ids,
.probe = be_probe,
.remove = be_remove,
.suspend = be_suspend,
- .resume = be_resume
+ .resume = be_resume,
+ .shutdown = be_shutdown,
+ .err_handler = &be_eeh_handlers
};
static int __init be_init_module(void)