include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_init.c
index e9e4a1d..774663e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -28,6 +28,8 @@
 #include <linux/pci.h>
 #include <linux/spinlock.h>
 #include <linux/ctype.h>
+#include <linux/aer.h>
+#include <linux/slab.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -108,7 +110,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                return -ENOMEM;
        }
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        phba->link_state = LPFC_INIT_MBX_CMDS;
 
        if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -211,7 +213,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                goto out_free_mbox;
 
        do {
-               lpfc_dump_mem(phba, pmb, offset);
+               lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 
                if (rc != MBX_SUCCESS) {
@@ -221,6 +223,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                                        mb->mbxCommand, mb->mbxStatus);
                        mb->un.varDmp.word_cnt = 0;
                }
+               /* dump mem may return a zero when finished or we got a
+                * mailbox error, either way we are done.
+                */
+               if (mb->un.varDmp.word_cnt == 0)
+                       break;
                if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
                        mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -249,7 +256,7 @@ out_free_mbox:
 static void
 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 {
-       if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
+       if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
                phba->temp_sensor_support = 1;
        else
                phba->temp_sensor_support = 0;
@@ -276,7 +283,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
        /* character array used for decoding dist type. */
        char dist_char[] = "nabx";
 
-       if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+       if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
                mempool_free(pmboxq, phba->mbox_mem_pool);
                return;
        }
@@ -284,7 +291,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
        prg = (struct prog_id *) &prog_id_word;
 
        /* word 7 contain option rom version */
-       prog_id_word = pmboxq->mb.un.varWords[7];
+       prog_id_word = pmboxq->u.mb.un.varWords[7];
 
        /* Decode the Option rom version word to a readable string */
        if (prg->dist < 4)
@@ -341,10 +348,15 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                phba->link_state = LPFC_HBA_ERROR;
                return -ENOMEM;
        }
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
 
        /* Get login parameters for NID.  */
-       lpfc_read_sparam(phba, pmb, 0);
+       rc = lpfc_read_sparam(phba, pmb, 0);
+       if (rc) {
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return -ENOMEM;
+       }
+
        pmb->vport = vport;
        if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -353,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                                mb->mbxCommand, mb->mbxStatus);
                phba->link_state = LPFC_HBA_ERROR;
                mp = (struct lpfc_dmabuf *) pmb->context1;
-               mempool_free( pmb, phba->mbox_mem_pool);
+               mempool_free(pmb, phba->mbox_mem_pool);
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
                return -EIO;
@@ -380,6 +392,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        /* Update the fc_host data structures with new wwn. */
        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+       fc_host_max_npiv_vports(shost) = phba->max_vpi;
 
        /* If no serial number in VPD data, use low 6 bytes of WWNN */
        /* This should be consolidated into parse_vpd ? - mr */
@@ -419,10 +432,14 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                return -EIO;
        }
 
+       /* Check if the port is disabled */
+       lpfc_sli_read_link_ste(phba);
+
        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
        if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
                phba->cfg_hba_queue_depth =
-                       mb->un.varRdConfig.max_xri + 1;
+                       (mb->un.varRdConfig.max_xri + 1) -
+                                       lpfc_sli4_get_els_iocb_cnt(phba);
 
        phba->lmt = mb->un.varRdConfig.lmt;
 
@@ -476,17 +493,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
                                        "0352 Config MSI mailbox command "
                                        "failed, mbxCmd x%x, mbxStatus x%x\n",
-                                       pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+                                       pmb->u.mb.mbxCommand,
+                                       pmb->u.mb.mbxStatus);
                        mempool_free(pmb, phba->mbox_mem_pool);
                        return -EIO;
                }
        }
 
+       spin_lock_irq(&phba->hbalock);
        /* Initialize ERATT handling flag */
        phba->hba_flag &= ~HBA_ERATT_HANDLED;
 
        /* Enable appropriate host interrupts */
-       spin_lock_irq(&phba->hbalock);
        status = readl(phba->HCregaddr);
        status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
        if (psli->num_rings > 0)
@@ -516,30 +534,54 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        /* Set up error attention (ERATT) polling timer */
        mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
 
-       lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
-       pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-       lpfc_set_loopback_flag(phba);
-       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-       if (rc != MBX_SUCCESS) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+       if (phba->hba_flag & LINK_DISABLED) {
+               lpfc_printf_log(phba,
+                       KERN_ERR, LOG_INIT,
+                       "2598 Adapter Link is disabled.\n");
+               lpfc_down_link(phba, pmb);
+               pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+               if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+                       lpfc_printf_log(phba,
+                       KERN_ERR, LOG_INIT,
+                       "2599 Adapter failed to issue DOWN_LINK"
+                       " mbox command rc 0x%x\n", rc);
+
+                       mempool_free(pmb, phba->mbox_mem_pool);
+                       return -EIO;
+               }
+       } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+               lpfc_init_link(phba, pmb, phba->cfg_topology,
+                       phba->cfg_link_speed);
+               pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               lpfc_set_loopback_flag(phba);
+               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+               if (rc != MBX_SUCCESS) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0454 Adapter failed to init, mbxCmd x%x "
                                "INIT_LINK, mbxStatus x%x\n",
                                mb->mbxCommand, mb->mbxStatus);
 
-               /* Clear all interrupt enable conditions */
-               writel(0, phba->HCregaddr);
-               readl(phba->HCregaddr); /* flush */
-               /* Clear all pending interrupts */
-               writel(0xffffffff, phba->HAregaddr);
-               readl(phba->HAregaddr); /* flush */
+                       /* Clear all interrupt enable conditions */
+                       writel(0, phba->HCregaddr);
+                       readl(phba->HCregaddr); /* flush */
+                       /* Clear all pending interrupts */
+                       writel(0xffffffff, phba->HAregaddr);
+                       readl(phba->HAregaddr); /* flush */
 
-               phba->link_state = LPFC_HBA_ERROR;
-               if (rc != MBX_BUSY)
-                       mempool_free(pmb, phba->mbox_mem_pool);
-               return -EIO;
+                       phba->link_state = LPFC_HBA_ERROR;
+                       if (rc != MBX_BUSY)
+                               mempool_free(pmb, phba->mbox_mem_pool);
+                       return -EIO;
+               }
        }
        /* MBOX buffer will be freed in mbox compl */
        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->link_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+
        lpfc_config_async(phba, pmb, LPFC_ELS_RING);
        pmb->mbox_cmpl = lpfc_config_async_cmpl;
        pmb->vport = phba->pport;
@@ -550,13 +592,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                                KERN_ERR,
                                LOG_INIT,
                                "0456 Adapter failed to issue "
-                               "ASYNCEVT_ENABLE mbox status x%x \n.",
+                               "ASYNCEVT_ENABLE mbox status x%x\n",
                                rc);
                mempool_free(pmb, phba->mbox_mem_pool);
        }
 
        /* Get Option rom version */
        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->link_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+
        lpfc_dump_wakeup_param(phba, pmb);
        pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
        pmb->vport = phba->pport;
@@ -564,7 +611,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 
        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
-                               "to get Option ROM version status x%x\n.", rc);
+                               "to get Option ROM version status x%x\n", rc);
                mempool_free(pmb, phba->mbox_mem_pool);
        }
 
@@ -572,6 +619,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_hba_init_link - Initialize the FC link
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *             0 - success
+ *             Any other value - error
+ **/
+int
+lpfc_hba_init_link(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport = phba->pport;
+       LPFC_MBOXQ_t *pmb;
+       MAILBOX_t *mb;
+       int rc;
+
+       pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->link_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+       mb = &pmb->u.mb;
+       pmb->vport = vport;
+
+       lpfc_init_link(phba, pmb, phba->cfg_topology,
+               phba->cfg_link_speed);
+       pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       lpfc_set_loopback_flag(phba);
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "0498 Adapter failed to init, mbxCmd x%x "
+                       "INIT_LINK, mbxStatus x%x\n",
+                       mb->mbxCommand, mb->mbxStatus);
+               /* Clear all interrupt enable conditions */
+               writel(0, phba->HCregaddr);
+               readl(phba->HCregaddr); /* flush */
+               /* Clear all pending interrupts */
+               writel(0xffffffff, phba->HAregaddr);
+               readl(phba->HAregaddr); /* flush */
+               phba->link_state = LPFC_HBA_ERROR;
+               if (rc != MBX_BUSY)
+                       mempool_free(pmb, phba->mbox_mem_pool);
+               return -EIO;
+       }
+       phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
+
+       return 0;
+}
+
+/**
+ * lpfc_hba_down_link - this routine downs the FC link
+ *
+ * This routine will issue the DOWN_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use to stop the link.
+ *
+ * Return code
+ *             0 - success
+ *             Any other value - error
+ **/
+int
+lpfc_hba_down_link(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *pmb;
+       int rc;
+
+       pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               phba->link_state = LPFC_HBA_ERROR;
+               return -ENOMEM;
+       }
+
+       lpfc_printf_log(phba,
+               KERN_ERR, LOG_INIT,
+               "0491 Adapter Link is disabled.\n");
+       lpfc_down_link(phba, pmb);
+       pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+       if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+               lpfc_printf_log(phba,
+               KERN_ERR, LOG_INIT,
+               "2522 Adapter failed to issue DOWN_LINK"
+               " mbox command rc 0x%x\n", rc);
+
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return -EIO;
+       }
+       return 0;
+}
+
+/**
  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
  * @phba: pointer to lpfc HBA data structure.
  *
@@ -615,7 +758,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
  * down the SLI Layer.
  *
  * Return codes
- *   0 - sucess.
+ *   0 - success.
  *   Any other value - error.
  **/
 static int
@@ -670,7 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
  * down the SLI Layer.
  *
  * Return codes
- *   0 - sucess.
+ *   0 - success.
  *   Any other value - error.
  **/
 static int
@@ -680,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
        LIST_HEAD(aborts);
        int ret;
        unsigned long iflag = 0;
+       struct lpfc_sglq *sglq_entry = NULL;
+
        ret = lpfc_hba_down_post_s3(phba);
        if (ret)
                return ret;
@@ -695,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
         * list.
         */
        spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+       list_for_each_entry(sglq_entry,
+               &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
+               sglq_entry->state = SGL_FREED;
+
        list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
                        &phba->sli4_hba.lpfc_sgl_list);
        spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -725,7 +874,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
  * uninitialization after the HBA is reset when bring down the SLI Layer.
  *
  * Return codes
- *   0 - sucess.
+ *   0 - success.
  *   Any other value - error.
  **/
 int
@@ -822,12 +971,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 void
 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 {
+       struct lpfc_vport **vports;
        LPFC_MBOXQ_t *pmboxq;
        struct lpfc_dmabuf *buf_ptr;
-       int retval;
+       int retval, i;
        struct lpfc_sli *psli = &phba->sli;
        LIST_HEAD(completions);
 
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+                       lpfc_rcv_seq_check_edtov(vports[i]);
+       lpfc_destroy_vport_work_array(phba, vports);
+
        if ((phba->link_state == LPFC_HBA_ERROR) ||
                (phba->pport->load_flag & FC_UNLOADING) ||
                (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -900,7 +1056,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        "taking this port offline.\n");
 
                        spin_lock_irq(&phba->hbalock);
-                       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+                       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
                        spin_unlock_irq(&phba->hbalock);
 
                        lpfc_offline_prep(phba);
@@ -925,13 +1081,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
        struct lpfc_sli   *psli = &phba->sli;
 
        spin_lock_irq(&phba->hbalock);
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
        lpfc_offline_prep(phba);
 
        lpfc_offline(phba);
        lpfc_reset_barrier(phba);
+       spin_lock_irq(&phba->hbalock);
        lpfc_sli_brdreset(phba);
+       spin_unlock_irq(&phba->hbalock);
        lpfc_hba_down_post(phba);
        lpfc_sli_brdready(phba, HS_MBRDY);
        lpfc_unblock_mgmt_io(phba);
@@ -974,6 +1132,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
        struct lpfc_sli_ring  *pring;
        struct lpfc_sli *psli = &phba->sli;
 
+       /* If the pci channel is offline, ignore possible errors,
+        * since we cannot communicate with the pci card anyway.
+        */
+       if (pci_channel_offline(phba->pcidev)) {
+               spin_lock_irq(&phba->hbalock);
+               phba->hba_flag &= ~DEFER_ERATT;
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                "0479 Deferred Adapter Hardware Error "
                "Data: x%x x%x x%x\n",
@@ -981,7 +1149,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
                phba->work_status[0], phba->work_status[1]);
 
        spin_lock_irq(&phba->hbalock);
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
 
 
@@ -1091,7 +1259,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
                                phba->work_status[0], phba->work_status[1]);
 
                spin_lock_irq(&phba->hbalock);
-               psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+               psli->sli_flag &= ~LPFC_SLI_ACTIVE;
                spin_unlock_irq(&phba->hbalock);
 
                /*
@@ -1212,7 +1380,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
  * routine from the API jump table function pointer from the lpfc_hba struct.
  *
  * Return codes
- *   0 - sucess.
+ *   0 - success.
  *   Any other value - error.
  **/
 void
@@ -1479,10 +1647,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
        int GE = 0;
        int oneConnect = 0; /* default is not a oneConnect */
        struct {
-               char * name;
-               int    max_speed;
-               char * bus;
-       } m = {"<Unknown>", 0, ""};
+               char *name;
+               char *bus;
+               char *function;
+       } m = {"<Unknown>", "", ""};
 
        if (mdp && mdp[0] != '\0'
                && descp && descp[0] != '\0')
@@ -1503,136 +1671,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 
        switch (dev_id) {
        case PCI_DEVICE_ID_FIREFLY:
-               m = (typeof(m)){"LP6000", max_speed, "PCI"};
+               m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SUPERFLY:
                if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
-                       m = (typeof(m)){"LP7000", max_speed,  "PCI"};
+                       m = (typeof(m)){"LP7000", "PCI",
+                                       "Fibre Channel Adapter"};
                else
-                       m = (typeof(m)){"LP7000E", max_speed, "PCI"};
+                       m = (typeof(m)){"LP7000E", "PCI",
+                                       "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_DRAGONFLY:
-               m = (typeof(m)){"LP8000", max_speed, "PCI"};
+               m = (typeof(m)){"LP8000", "PCI",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_CENTAUR:
                if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
-                       m = (typeof(m)){"LP9002", max_speed, "PCI"};
+                       m = (typeof(m)){"LP9002", "PCI",
+                                       "Fibre Channel Adapter"};
                else
-                       m = (typeof(m)){"LP9000", max_speed, "PCI"};
+                       m = (typeof(m)){"LP9000", "PCI",
+                                       "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_RFLY:
-               m = (typeof(m)){"LP952", max_speed, "PCI"};
+               m = (typeof(m)){"LP952", "PCI",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_PEGASUS:
-               m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
+               m = (typeof(m)){"LP9802", "PCI-X",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_THOR:
-               m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
+               m = (typeof(m)){"LP10000", "PCI-X",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_VIPER:
-               m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
+               m = (typeof(m)){"LPX1000",  "PCI-X",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_PFLY:
-               m = (typeof(m)){"LP982", max_speed, "PCI-X"};
+               m = (typeof(m)){"LP982", "PCI-X",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_TFLY:
-               m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
+               m = (typeof(m)){"LP1050", "PCI-X",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_HELIOS:
-               m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
+               m = (typeof(m)){"LP11000", "PCI-X2",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_HELIOS_SCSP:
-               m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
+               m = (typeof(m)){"LP11000-SP", "PCI-X2",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_HELIOS_DCSP:
-               m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
+               m = (typeof(m)){"LP11002-SP",  "PCI-X2",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_NEPTUNE:
-               m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_NEPTUNE_SCSP:
-               m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_NEPTUNE_DCSP:
-               m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_BMID:
-               m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
+               m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_BSMB:
-               m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
+               m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_ZEPHYR:
-               m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_ZEPHYR_SCSP:
-               m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_ZEPHYR_DCSP:
-               m = (typeof(m)){"LP2105", max_speed, "PCIe"};
+               m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
                GE = 1;
                break;
        case PCI_DEVICE_ID_ZMID:
-               m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_ZSMB:
-               m = (typeof(m)){"LPe111", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_LP101:
-               m = (typeof(m)){"LP101", max_speed, "PCI-X"};
+               m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_LP10000S:
-               m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
+               m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_LP11000S:
-               m = (typeof(m)){"LP11000-S", max_speed,
-                       "PCI-X2"};
+               m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_LPE11000S:
-               m = (typeof(m)){"LPe11000-S", max_speed,
-                       "PCIe"};
+               m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SAT:
-               m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SAT_MID:
-               m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SAT_SMB:
-               m = (typeof(m)){"LPe121", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SAT_DCSP:
-               m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SAT_SCSP:
-               m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_SAT_S:
-               m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
+               m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_HORNET:
-               m = (typeof(m)){"LP21000", max_speed, "PCIe"};
+               m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
                GE = 1;
                break;
        case PCI_DEVICE_ID_PROTEUS_VF:
-               m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+               m = (typeof(m)){"LPev12000", "PCIe IOV",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_PROTEUS_PF:
-               m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+               m = (typeof(m)){"LPev12000", "PCIe IOV",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_PROTEUS_S:
-               m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
+               m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
+                               "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_TIGERSHARK:
                oneConnect = 1;
-               m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
+               m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
                break;
-       case PCI_DEVICE_ID_TIGERSHARK_S:
+       case PCI_DEVICE_ID_TOMCAT:
                oneConnect = 1;
-               m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
+               m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
+               break;
+       case PCI_DEVICE_ID_FALCON:
+               m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
+                               "EmulexSecure Fibre"};
                break;
        default:
-               m = (typeof(m)){ NULL };
+               m = (typeof(m)){"Unknown", "", ""};
                break;
        }
 
@@ -1644,17 +1831,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
        if (descp && descp[0] == '\0') {
                if (oneConnect)
                        snprintf(descp, 255,
-                               "Emulex OneConnect %s, FCoE Initiator, Port %s",
-                               m.name,
+                               "Emulex OneConnect %s, %s Initiator, Port %s",
+                               m.name, m.function,
                                phba->Port);
                else
                        snprintf(descp, 255,
                                "Emulex %s %d%s %s %s",
-                               m.name, m.max_speed,
-                               (GE) ? "GE" : "Gb",
-                               m.bus,
-                               (GE) ? "FCoE Adapter" :
-                                       "Fibre Channel Adapter");
+                               m.name, max_speed, (GE) ? "GE" : "Gb",
+                               m.bus, m.function);
        }
 }
 
@@ -2007,6 +2191,46 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
 }
 
 /**
+ * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
+ * caller of this routine should already hold the host lock.
+ **/
+void
+__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+       /* Clear pending FCF rediscovery wait and failover in progress flags */
+       phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
+                               FCF_DEAD_DISC |
+                               FCF_ACVL_DISC);
+       /* Now, try to stop the timer */
+       del_timer(&phba->fcf.redisc_wait);
+}
+
+/**
+ * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
+ * checks whether the FCF rediscovery wait timer is pending with the host
+ * lock held before proceeding with disabling the timer and clearing the
+ * wait timer pendig flag.
+ **/
+void
+lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+       spin_lock_irq(&phba->hbalock);
+       if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+               /* FCF rediscovery timer already fired or stopped */
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+       __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+       spin_unlock_irq(&phba->hbalock);
+}
+
+/**
  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  * @phba: pointer to lpfc hba data structure.
  *
@@ -2030,6 +2254,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
                break;
        case LPFC_PCI_DEV_OC:
                /* Stop any OneConnect device sepcific driver timers */
+               lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
                break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2117,6 +2342,8 @@ lpfc_online(struct lpfc_hba *phba)
                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+                       if (phba->sli_rev == LPFC_SLI_REV4)
+                               vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
                        spin_unlock_irq(shost->host_lock);
                }
                lpfc_destroy_vport_work_array(phba, vports);
@@ -2160,6 +2387,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
        struct lpfc_vport *vport = phba->pport;
        struct lpfc_nodelist  *ndlp, *next_ndlp;
        struct lpfc_vport **vports;
+       struct Scsi_Host *shost;
        int i;
 
        if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2173,11 +2401,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL) {
                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
-                       struct Scsi_Host *shost;
-
                        if (vports[i]->load_flag & FC_UNLOADING)
                                continue;
-                       vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       spin_lock_irq(shost->host_lock);
+                       vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+                       vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+                       vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
+                       spin_unlock_irq(shost->host_lock);
+
                        shost = lpfc_shost_from_vport(vports[i]);
                        list_for_each_entry_safe(ndlp, next_ndlp,
                                                 &vports[i]->fc_nodes,
@@ -2201,7 +2433,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
        }
        lpfc_destroy_vport_work_array(phba, vports);
 
-       lpfc_sli_flush_mbox_queue(phba);
+       lpfc_sli_mbox_sys_shutdown(phba);
 }
 
 /**
@@ -2268,6 +2500,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->hbalock);
        /* Release all the lpfc_scsi_bufs maintained by this host. */
+       spin_lock(&phba->scsi_buf_list_lock);
        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
                list_del(&sb->list);
                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2275,6 +2508,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
                kfree(sb);
                phba->total_scsi_bufs--;
        }
+       spin_unlock(&phba->scsi_buf_list_lock);
 
        /* Release all the lpfc_iocbq entries maintained by this host. */
        list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2282,9 +2516,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
                kfree(io);
                phba->total_iocbq_bufs--;
        }
-
        spin_unlock_irq(&phba->hbalock);
-
        return 0;
 }
 
@@ -2333,7 +2565,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        shost->this_id = -1;
        shost->max_cmd_len = 16;
        if (phba->sli_rev == LPFC_SLI_REV4) {
-               shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
+               shost->dma_boundary =
+                       phba->sli4_hba.pc_sli4_params.sge_supp_len;
                shost->sg_tablesize = phba->cfg_sg_seg_cnt;
        }
 
@@ -2367,8 +2600,16 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        init_timer(&vport->els_tmofunc);
        vport->els_tmofunc.function = lpfc_els_timeout;
        vport->els_tmofunc.data = (unsigned long)vport;
+       if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+               phba->menlo_flag |= HBA_MENLO_SUPPORT;
+               /* check for menlo minimum sg count */
+               if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
+                       phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+                       shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+               }
+       }
 
-       error = scsi_add_host(shost, dev);
+       error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
        if (error)
                goto out_put_shost;
 
@@ -2582,8 +2823,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
        lpfc_stop_hba_timers(phba);
        phba->pport->work_port_events = 0;
        phba->sli4_hba.intr_enable = 0;
-       /* Hard clear it for now, shall have more graceful way to wait later */
-       phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 }
 
 /**
@@ -2635,7 +2874,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
        del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
        bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
        bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
-              phba->fcf.fcf_indx);
+              phba->fcf.current_rec.fcf_indx);
 
        if (!phba->sli4_hba.intr_enable)
                rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2659,6 +2898,117 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
+ * @phba: Pointer to hba for which this call is being executed.
+ *
+ * This routine starts the timer waiting for the FCF rediscovery to complete.
+ **/
+void
+lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
+{
+       unsigned long fcf_redisc_wait_tmo =
+               (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
+       /* Start fcf rediscovery wait period timer */
+       mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
+       spin_lock_irq(&phba->hbalock);
+       /* Allow action to new fcf asynchronous event */
+       phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+       /* Mark the FCF rediscovery pending state */
+       phba->fcf.fcf_flag |= FCF_REDISC_PEND;
+       spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when waiting for FCF table rediscover has been
+ * timed out. If new FCF record(s) has (have) been discovered during the
+ * wait period, a new FCF event shall be added to the FCOE async event
+ * list, and then worker thread shall be waked up for processing from the
+ * worker thread context.
+ **/
+void
+lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+{
+       struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+
+       /* Don't send FCF rediscovery event if timer cancelled */
+       spin_lock_irq(&phba->hbalock);
+       if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+       /* Clear FCF rediscovery timer pending flag */
+       phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+       /* FCF rediscovery event to worker thread */
+       phba->fcf.fcf_flag |= FCF_REDISC_EVT;
+       spin_unlock_irq(&phba->hbalock);
+       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                       "2776 FCF rediscover wait timer expired, post "
+                       "a worker thread event for FCF table scan\n");
+       /* wake up worker thread */
+       lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function uses the QUERY_FW_CFG mailbox command to determine if the
+ * firmware loaded supports FCoE. A return of zero indicates that the mailbox
+ * was successful and the firmware supports FCoE. Any other return indicates
+ * a error. It is assumed that this function will be called before interrupts
+ * are enabled.
+ **/
+static int
+lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
+{
+       int rc = 0;
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
+       uint32_t length;
+       uint32_t shdr_status, shdr_add_status;
+
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2621 Failed to allocate mbox for "
+                               "query firmware config cmd\n");
+               return -ENOMEM;
+       }
+       query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
+       length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+                        length, LPFC_SLI4_MBX_EMBED);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr_status = bf_get(lpfc_mbox_hdr_status,
+                            &query_fw_cfg->header.cfg_shdr.response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                &query_fw_cfg->header.cfg_shdr.response);
+       if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2622 Query Firmware Config failed "
+                               "mbx status x%x, status x%x add_status x%x\n",
+                               rc, shdr_status, shdr_add_status);
+               return -EINVAL;
+       }
+       if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2623 FCoE Function not supported by firmware. "
+                               "Function mode = %08x\n",
+                               query_fw_cfg->function_mode);
+               return -EINVAL;
+       }
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+       return 0;
+}
+
+/**
  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async link completion queue entry.
@@ -2791,6 +3141,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
        att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
        if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
                return;
+       phba->fcoe_eventtag = acqe_link->event_tag;
        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmb) {
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -2852,6 +3203,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
                                bf_get(lpfc_acqe_link_physical, acqe_link);
        phba->sli4_hba.link_state.fault =
                                bf_get(lpfc_acqe_link_fault, acqe_link);
+       phba->sli4_hba.link_state.logical_speed =
+                               bf_get(lpfc_acqe_qos_link_speed, acqe_link);
 
        /* Invoke the lpfc_handle_latt mailbox command callback function */
        lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -2865,6 +3218,68 @@ out_free_pmb:
 }
 
 /**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+       struct lpfc_nodelist *ndlp;
+       struct Scsi_Host *shost;
+       struct lpfc_hba *phba;
+
+       if (!vport)
+               return NULL;
+       ndlp = lpfc_findnode_did(vport, Fabric_DID);
+       if (!ndlp)
+               return NULL;
+       phba = vport->phba;
+       if (!phba)
+               return NULL;
+       if (phba->pport->port_state <= LPFC_FLOGI)
+               return NULL;
+       /* If virtual link is not yet instantiated ignore CVL */
+       if (vport->port_state <= LPFC_FDISC)
+               return NULL;
+       shost = lpfc_shost_from_vport(vport);
+       if (!shost)
+               return NULL;
+       lpfc_linkdown_port(vport);
+       lpfc_cleanup_pending_mbox(vport);
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag |= FC_VPORT_CVL_RCVD;
+       spin_unlock_irq(shost->host_lock);
+
+       return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       int i;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+                       lpfc_sli4_perform_vport_cvl(vports[i]);
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
  * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -2877,57 +3292,205 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 {
        uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
        int rc;
+       struct lpfc_vport *vport;
+       struct lpfc_nodelist *ndlp;
+       struct Scsi_Host  *shost;
+       int active_vlink_present;
+       struct lpfc_vport **vports;
+       int i;
 
+       phba->fc_eventTag = acqe_fcoe->event_tag;
+       phba->fcoe_eventtag = acqe_fcoe->event_tag;
        switch (event_type) {
        case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                       "2546 New FCF found index 0x%x tag 0x%x \n",
-                       acqe_fcoe->fcf_index,
-                       acqe_fcoe->event_tag);
-               /*
-                * If the current FCF is in discovered state,
-                * do nothing.
-                */
+       case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                       "2546 New FCF found/FCF parameter modified event: "
+                       "evt_tag:x%x, fcf_index:x%x\n",
+                       acqe_fcoe->event_tag, acqe_fcoe->index);
+
                spin_lock_irq(&phba->hbalock);
-               if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
+               if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
+                   (phba->hba_flag & FCF_DISC_INPROGRESS)) {
+                       /*
+                        * If the current FCF is in discovered state or
+                        * FCF discovery is in progress, do nothing.
+                        */
+                       spin_unlock_irq(&phba->hbalock);
+                       break;
+               }
+
+               if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+                       /*
+                        * If fast FCF failover rescan event is pending,
+                        * do nothing.
+                        */
                        spin_unlock_irq(&phba->hbalock);
                        break;
                }
                spin_unlock_irq(&phba->hbalock);
 
-               /* Read the FCF table and re-discover SAN. */
-               rc = lpfc_sli4_read_fcf_record(phba,
-                       LPFC_FCOE_FCF_GET_FIRST);
+               if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
+                   !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+                       /*
+                        * During period of FCF discovery, read the FCF
+                        * table record indexed by the event to update
+                        * FCF round robin failover eligible FCF bmask.
+                        */
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+                                       LOG_DISCOVERY,
+                                       "2779 Read new FCF record with "
+                                       "fcf_index:x%x for updating FCF "
+                                       "round robin failover bmask\n",
+                                       acqe_fcoe->index);
+                       rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
+               }
+
+               /* Otherwise, scan the entire FCF table and re-discover SAN */
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                               "2770 Start FCF table scan due to new FCF "
+                               "event: evt_tag:x%x, fcf_index:x%x\n",
+                               acqe_fcoe->event_tag, acqe_fcoe->index);
+               rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+                                                    LPFC_FCOE_FCF_GET_FIRST);
                if (rc)
-                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                               "2547 Read FCF record failed 0x%x\n",
-                               rc);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                                       "2547 Issue FCF scan read FCF mailbox "
+                                       "command failed 0x%x\n", rc);
                break;
 
        case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                       "2548 FCF Table full count 0x%x tag 0x%x \n",
+                       "2548 FCF Table full count 0x%x tag 0x%x\n",
                        bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
                        acqe_fcoe->event_tag);
                break;
 
        case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                       "2549 FCF disconnected fron network index 0x%x"
-                       " tag 0x%x \n", acqe_fcoe->fcf_index,
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                       "2549 FCF disconnected from network index 0x%x"
+                       " tag 0x%x\n", acqe_fcoe->index,
                        acqe_fcoe->event_tag);
                /* If the event is not for currently used fcf do nothing */
-               if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
+               if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
                        break;
-               /*
-                * Currently, driver support only one FCF - so treat this as
-                * a link down.
+               /* We request port to rediscover the entire FCF table for
+                * a fast recovery from case that the current FCF record
+                * is no longer valid if we are not in the middle of FCF
+                * failover process already.
                 */
-               lpfc_linkdown(phba);
-               /* Unregister FCF if no devices connected to it */
-               lpfc_unregister_unused_fcf(phba);
+               spin_lock_irq(&phba->hbalock);
+               if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+                       spin_unlock_irq(&phba->hbalock);
+                       /* Update FLOGI FCF failover eligible FCF bmask */
+                       lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
+                       break;
+               }
+               /* Mark the fast failover process in progress */
+               phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+               spin_unlock_irq(&phba->hbalock);
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                               "2771 Start FCF fast failover process due to "
+                               "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+                               "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
+               rc = lpfc_sli4_redisc_fcf_table(phba);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+                                       LOG_DISCOVERY,
+                                       "2772 Issue FCF rediscover mabilbox "
+                                       "command failed, fail through to FCF "
+                                       "dead event\n");
+                       spin_lock_irq(&phba->hbalock);
+                       phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+                       spin_unlock_irq(&phba->hbalock);
+                       /*
+                        * Last resort will fail over by treating this
+                        * as a link down to FCF registration.
+                        */
+                       lpfc_sli4_fcf_dead_failthrough(phba);
+               } else
+                       /* Handling fast FCF failover to a DEAD FCF event
+                        * is considered equalivant to receiving CVL to all
+                        * vports.
+                        */
+                       lpfc_sli4_perform_all_vport_cvl(phba);
                break;
+       case LPFC_FCOE_EVENT_TYPE_CVL:
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                       "2718 Clear Virtual Link Received for VPI 0x%x"
+                       " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
+               vport = lpfc_find_vport_by_vpid(phba,
+                               acqe_fcoe->index - phba->vpi_base);
+               ndlp = lpfc_sli4_perform_vport_cvl(vport);
+               if (!ndlp)
+                       break;
+               active_vlink_present = 0;
+
+               vports = lpfc_create_vport_work_array(phba);
+               if (vports) {
+                       for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+                                       i++) {
+                               if ((!(vports[i]->fc_flag &
+                                       FC_VPORT_CVL_RCVD)) &&
+                                       (vports[i]->port_state > LPFC_FDISC)) {
+                                       active_vlink_present = 1;
+                                       break;
+                               }
+                       }
+                       lpfc_destroy_vport_work_array(phba, vports);
+               }
 
+               if (active_vlink_present) {
+                       /*
+                        * If there are other active VLinks present,
+                        * re-instantiate the Vlink using FDISC.
+                        */
+                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       shost = lpfc_shost_from_vport(vport);
+                       spin_lock_irq(shost->host_lock);
+                       ndlp->nlp_flag |= NLP_DELAY_TMO;
+                       spin_unlock_irq(shost->host_lock);
+                       ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+                       vport->port_state = LPFC_FDISC;
+               } else {
+                       /*
+                        * Otherwise, we request port to rediscover
+                        * the entire FCF table for a fast recovery
+                        * from possible case that the current FCF
+                        * is no longer valid if we are not already
+                        * in the FCF failover process.
+                        */
+                       spin_lock_irq(&phba->hbalock);
+                       if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+                               spin_unlock_irq(&phba->hbalock);
+                               break;
+                       }
+                       /* Mark the fast failover process in progress */
+                       phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+                       spin_unlock_irq(&phba->hbalock);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+                                       LOG_DISCOVERY,
+                                       "2773 Start FCF fast failover due "
+                                       "to CVL event: evt_tag:x%x\n",
+                                       acqe_fcoe->event_tag);
+                       rc = lpfc_sli4_redisc_fcf_table(phba);
+                       if (rc) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+                                               LOG_DISCOVERY,
+                                               "2774 Issue FCF rediscover "
+                                               "mabilbox command failed, "
+                                               "through to CVL event\n");
+                               spin_lock_irq(&phba->hbalock);
+                               phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+                               spin_unlock_irq(&phba->hbalock);
+                               /*
+                                * Last resort will be re-try on the
+                                * the current registered FCF entry.
+                                */
+                               lpfc_retry_pport_discovery(phba);
+                       }
+               }
+               break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                        "0288 Unknown FCoE event type 0x%x event tag "
@@ -2947,6 +3510,7 @@ static void
 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
                         struct lpfc_acqe_dcbx *acqe_dcbx)
 {
+       phba->fc_eventTag = acqe_dcbx->event_tag;
        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                        "0290 The SLI4 DCBX asynchronous event is not "
                        "handled yet\n");
@@ -3001,6 +3565,37 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process FCF table
+ * rediscovery pending completion event.
+ **/
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
+{
+       int rc;
+
+       spin_lock_irq(&phba->hbalock);
+       /* Clear FCF rediscovery timeout event */
+       phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
+       /* Clear driver fast failover FCF record flag */
+       phba->fcf.failover_rec.flag = 0;
+       /* Set state for FCF fast failover */
+       phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Scan FCF table from the first entry to re-discover SAN */
+       lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                       "2777 Start FCF table scan after FCF "
+                       "rediscovery quiescent period over\n");
+       rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+       if (rc)
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                               "2747 Issue FCF scan read FCF mailbox "
+                               "command failed 0x%x\n", rc);
+}
+
+/**
  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
  * @phba: pointer to lpfc hba data structure.
  * @dev_grp: The HBA PCI-Device group number.
@@ -3081,7 +3676,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
  * PCI devices.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -3177,7 +3772,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
  * support the SLI-3 HBA device it attached to.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -3278,15 +3873,18 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
  * support the SLI-4 HBA device it attached to.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
        struct lpfc_sli *psli;
-       int rc;
-       int i, hbq_count;
+       LPFC_MBOXQ_t *mboxq;
+       int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+       uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+       struct lpfc_mqe *mqe;
+       int longs;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -3315,6 +3913,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        init_timer(&phba->eratt_poll);
        phba->eratt_poll.function = lpfc_poll_eratt;
        phba->eratt_poll.data = (unsigned long) phba;
+       /* FCF rediscover timer */
+       init_timer(&phba->fcf.redisc_wait);
+       phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
+       phba->fcf.redisc_wait.data = (unsigned long)phba;
+
        /*
         * We need to do a READ_CONFIG mailbox command here before
         * calling lpfc_get_cfgparam. For VFs this will report the
@@ -3339,31 +3942,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
         * used to create the sg_dma_buf_pool must be dynamically calculated.
         * 2 segments are added since the IOCB needs a command and response bde.
         * To insure that the scsi sgl does not cross a 4k page boundary only
-        * sgl sizes of 1k, 2k, 4k, and 8k are supported.
-        * Table of sgl sizes and seg_cnt:
-        * sgl size,    sg_seg_cnt      total seg
-        * 1k           50              52
-        * 2k           114             116
-        * 4k           242             244
-        * 8k           498             500
-        * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
-        * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
-        * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
-        * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
+        * sgl sizes of must be a power of 2.
         */
-       if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
-               phba->cfg_sg_seg_cnt = 50;
-       else if (phba->cfg_sg_seg_cnt <= 114)
-               phba->cfg_sg_seg_cnt = 114;
-       else if (phba->cfg_sg_seg_cnt <= 242)
-               phba->cfg_sg_seg_cnt = 242;
+       buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
+                   ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
+       /* Feature Level 1 hardware is limited to 2 pages */
+       if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
+            LPFC_SLI_INTF_FEATURELEVEL1_1))
+               max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
        else
-               phba->cfg_sg_seg_cnt = 498;
-
-       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
-                                       + sizeof(struct fcp_rsp);
-       phba->cfg_sg_dma_buf_size +=
-               ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+               max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
+       for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
+            dma_buf_size < max_buf_size && buf_size > dma_buf_size;
+            dma_buf_size = dma_buf_size << 1)
+               ;
+       if (dma_buf_size == max_buf_size)
+               phba->cfg_sg_seg_cnt = (dma_buf_size -
+                       sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
+                       (2 * sizeof(struct sli4_sge))) /
+                               sizeof(struct sli4_sge);
+       phba->cfg_sg_dma_buf_size = dma_buf_size;
 
        /* Initialize buffer queue management fields */
        hbq_count = lpfc_sli_hbq_count();
@@ -3389,7 +3987,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* Driver internel slow-path CQ Event pool */
        INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
        /* Response IOCB work queue list */
-       INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
        /* Asynchronous event CQ Event work queue list */
        INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
        /* Fast-path XRI aborted CQ Event work queue list */
@@ -3418,6 +4016,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        if (unlikely(rc))
                goto out_free_bsmbx;
 
+       rc = lpfc_sli4_fw_cfg_check(phba);
+       if (unlikely(rc))
+               goto out_free_bsmbx;
+
        /* Set up the hba's configuration parameters. */
        rc = lpfc_sli4_read_config(phba);
        if (unlikely(rc))
@@ -3459,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_active_sgl;
        }
 
+       /* Allocate eligible FCF bmask memory for FCF round robin failover */
+       longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+       phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+                                        GFP_KERNEL);
+       if (!phba->fcf.fcf_rr_bmask) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2759 Failed allocate memory for FCF round "
+                               "robin failover bmask\n");
+               goto out_remove_rpi_hdrs;
+       }
+
        phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
                                    phba->cfg_fcp_eq_count), GFP_KERNEL);
        if (!phba->sli4_hba.fcp_eq_hdl) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2572 Failed allocate memory for fast-path "
                                "per-EQ handle array\n");
-               goto out_remove_rpi_hdrs;
+               goto out_free_fcf_rr_bmask;
        }
 
        phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3477,10 +4090,49 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_fcp_eq_hdl;
        }
 
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+                                                      GFP_KERNEL);
+       if (!mboxq) {
+               rc = -ENOMEM;
+               goto out_free_fcp_eq_hdl;
+       }
+
+       /* Get the Supported Pages. It is always available. */
+       lpfc_supported_pages(mboxq);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (unlikely(rc)) {
+               rc = -EIO;
+               mempool_free(mboxq, phba->mbox_mem_pool);
+               goto out_free_fcp_eq_hdl;
+       }
+
+       mqe = &mboxq->u.mqe;
+       memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+              LPFC_MAX_SUPPORTED_PAGES);
+       for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+               switch (pn_page[i]) {
+               case LPFC_SLI4_PARAMETERS:
+                       phba->sli4_hba.pc_sli4_params.supported = 1;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /* Read the port's SLI4 Parameters capabilities if supported. */
+       if (phba->sli4_hba.pc_sli4_params.supported)
+               rc = lpfc_pc_sli4_params_get(phba, mboxq);
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       if (rc) {
+               rc = -EIO;
+               goto out_free_fcp_eq_hdl;
+       }
        return rc;
 
 out_free_fcp_eq_hdl:
        kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_fcf_rr_bmask:
+       kfree(phba->fcf.fcf_rr_bmask);
 out_remove_rpi_hdrs:
        lpfc_sli4_remove_rpi_hdrs(phba);
 out_free_active_sgl:
@@ -3524,6 +4176,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 
        /* Free the allocated rpi headers. */
        lpfc_sli4_remove_rpi_hdrs(phba);
+       lpfc_sli4_remove_rpis(phba);
+
+       /* Free eligible FCF index bmask */
+       kfree(phba->fcf.fcf_rr_bmask);
 
        /* Free the ELS sgl list */
        lpfc_free_active_sgl(phba);
@@ -3550,8 +4206,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 
        /* Free the current connect table */
        list_for_each_entry_safe(conn_entry, next_conn_entry,
-               &phba->fcf_conn_rec_list, list)
+               &phba->fcf_conn_rec_list, list) {
+               list_del_init(&conn_entry->list);
                kfree(conn_entry);
+       }
 
        return;
 }
@@ -3569,6 +4227,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 int
 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 {
+       phba->lpfc_hba_init_link = lpfc_hba_init_link;
+       phba->lpfc_hba_down_link = lpfc_hba_down_link;
        switch (dev_grp) {
        case LPFC_PCI_DEV_LP:
                phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -3598,7 +4258,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  * device specific resource setup to support the HBA device it attached to.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -3644,7 +4304,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
  * device specific resource setup to support the HBA device it attached to.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -3709,7 +4369,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
  * list and set up the IOCB tag array accordingly.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -3780,7 +4440,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
        rc = lpfc_sli4_remove_all_sgl_pages(phba);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                       "2005 Unable to deregister pages from HBA: %x", rc);
+                       "2005 Unable to deregister pages from HBA: %x\n", rc);
        }
        kfree(phba->sli4_hba.lpfc_els_sgl_array);
 }
@@ -3828,7 +4488,7 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
  * list and set up the sgl xritag tag array accordingly.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -3916,6 +4576,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
 
                /* The list order is used by later block SGL registraton */
                spin_lock_irq(&phba->hbalock);
+               sglq_entry->state = SGL_FREED;
                list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
                phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
                phba->sli4_hba.total_sglq_bufs++;
@@ -3942,7 +4603,7 @@ out_free_mem:
  * enabled and the driver is reinitializing the device.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -4102,7 +4763,7 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
  * PCI device data structure is set.
  *
  * Return codes
- *      pointer to @phba - sucessful
+ *      pointer to @phba - successful
  *      NULL - error
  **/
 static struct lpfc_hba *
@@ -4113,8 +4774,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
        /* Allocate memory for HBA structure */
        phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
        if (!phba) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1417 Failed to allocate hba struct.\n");
+               dev_err(&pdev->dev, "failed to allocate hba struct\n");
                return NULL;
        }
 
@@ -4128,6 +4788,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
                return NULL;
        }
 
+       spin_lock_init(&phba->ct_ev_lock);
+       INIT_LIST_HEAD(&phba->ct_ev_waiters);
+
        return phba;
 }
 
@@ -4156,7 +4819,7 @@ lpfc_hba_free(struct lpfc_hba *phba)
  * host with it.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      other values - error
  **/
 static int
@@ -4227,7 +4890,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
                        _dump_buf_data =
                                (char *) __get_free_pages(GFP_KERNEL, pagecnt);
                        if (_dump_buf_data) {
-                               printk(KERN_ERR "BLKGRD allocated %d pages for "
+                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+                                       "9043 BLKGRD: allocated %d pages for "
                                       "_dump_buf_data at 0x%p\n",
                                       (1 << pagecnt), _dump_buf_data);
                                _dump_buf_data_order = pagecnt;
@@ -4238,17 +4902,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
                                --pagecnt;
                }
                if (!_dump_buf_data_order)
-                       printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+                               "9044 BLKGRD: ERROR unable to allocate "
                               "memory for hexdump\n");
        } else
-               printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+                       "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
                       "\n", _dump_buf_data);
        if (!_dump_buf_dif) {
                while (pagecnt) {
                        _dump_buf_dif =
                                (char *) __get_free_pages(GFP_KERNEL, pagecnt);
                        if (_dump_buf_dif) {
-                               printk(KERN_ERR "BLKGRD allocated %d pages for "
+                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+                                       "9046 BLKGRD: allocated %d pages for "
                                       "_dump_buf_dif at 0x%p\n",
                                       (1 << pagecnt), _dump_buf_dif);
                                _dump_buf_dif_order = pagecnt;
@@ -4259,10 +4926,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
                                --pagecnt;
                }
                if (!_dump_buf_dif_order)
-                       printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+                       "9047 BLKGRD: ERROR unable to allocate "
                               "memory for hexdump\n");
        } else
-               printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+                       "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
                       _dump_buf_dif);
 }
 
@@ -4319,7 +4988,7 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
  * with SLI-3 interface spec.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -4338,9 +5007,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
                pdev = phba->pcidev;
 
        /* Set the device DMA mask size */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+        || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+                || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
                        return error;
+               }
+       }
 
        /* Get the bus address of Bar0 and Bar2 and the number of bytes
         * required by each mapping.
@@ -4465,30 +5138,12 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
 int
 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 {
-       struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
-       uint32_t onlnreg0, onlnreg1;
+       struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
        int i, port_error = -ENODEV;
 
        if (!phba->sli4_hba.STAregaddr)
                return -ENODEV;
 
-       /* With uncoverable error, log the error message and return error */
-       onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
-       onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
-       if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
-               uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
-               uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
-               if (uerrlo_reg.word0 || uerrhi_reg.word0) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "1422 HBA Unrecoverable error: "
-                                       "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-                                       "online0_reg=0x%x, online1_reg=0x%x\n",
-                                       uerrlo_reg.word0, uerrhi_reg.word0,
-                                       onlnreg0, onlnreg1);
-               }
-               return -ENODEV;
-       }
-
        /* Wait up to 30 seconds for the SLI Port POST done and ready */
        for (i = 0; i < 3000; i++) {
                sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
@@ -4519,14 +5174,37 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
                        bf_get(lpfc_hst_state_port_status, &sta_reg));
 
        /* Log device information */
-       scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
-                       "FeatureL1=0x%x, FeatureL2=0x%x\n",
-                       bf_get(lpfc_scratchpad_chiptype, &scratchpad),
-                       bf_get(lpfc_scratchpad_slirev, &scratchpad),
-                       bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
-                       bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+       phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
+       if (bf_get(lpfc_sli_intf_valid,
+                  &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
+                               "FeatureL1=0x%x, FeatureL2=0x%x\n",
+                               bf_get(lpfc_sli_intf_sli_family,
+                                      &phba->sli4_hba.sli_intf),
+                               bf_get(lpfc_sli_intf_slirev,
+                                      &phba->sli4_hba.sli_intf),
+                               bf_get(lpfc_sli_intf_featurelevel1,
+                                      &phba->sli4_hba.sli_intf),
+                               bf_get(lpfc_sli_intf_featurelevel2,
+                                      &phba->sli4_hba.sli_intf));
+       }
+       phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
+       phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
+       /* With uncoverable error, log the error message and return error */
+       uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+       uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+       if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
+           (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1422 HBA Unrecoverable error: "
+                               "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+                               "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
+                               uerrlo_reg.word0, uerrhi_reg.word0,
+                               phba->sli4_hba.ue_mask_lo,
+                               phba->sli4_hba.ue_mask_hi);
+               return -ENODEV;
+       }
 
        return port_error;
 }
@@ -4545,12 +5223,12 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
                                        LPFC_UERR_STATUS_LO;
        phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
                                        LPFC_UERR_STATUS_HI;
-       phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
-                                       LPFC_ONLINE0;
-       phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
-                                       LPFC_ONLINE1;
-       phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
-                                       LPFC_SCRATCHPAD;
+       phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_UE_MASK_LO;
+       phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_UE_MASK_HI;
+       phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_SLI_INTF;
 }
 
 /**
@@ -4616,7 +5294,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
  * this routine.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     ENOMEM - could not allocated memory.
  **/
 static int
@@ -4715,7 +5393,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  * allocation for the port.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -4779,7 +5457,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
                phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
-               phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
+               phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
+                               (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
                phba->max_vports = phba->max_vpi;
                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
                                "2003 cfg params XRI(B:%d M:%d), "
@@ -4815,7 +5494,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
  * HBA consistent with the SLI-4 interface spec.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -4864,7 +5543,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
  * we just use some constant number as place holder.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -4933,10 +5612,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        /* It does not make sense to have more EQs than WQs */
        if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "2593 The number of FCP EQs (%d) is more "
-                               "than the number of FCP WQs (%d), take "
-                               "the number of FCP EQs same as than of "
-                               "WQs (%d)\n", cfg_fcp_eq_count,
+                               "2593 The FCP EQ count(%d) cannot be greater "
+                               "than the FCP WQ count(%d), limiting the "
+                               "FCP EQ count to %d\n", cfg_fcp_eq_count,
                                phba->cfg_fcp_wq_count,
                                phba->cfg_fcp_wq_count);
                cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5012,15 +5690,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        }
        phba->sli4_hba.els_cq = qdesc;
 
-       /* Create slow-path Unsolicited Receive Complete Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                     phba->sli4_hba.cq_ecount);
-       if (!qdesc) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0502 Failed allocate slow-path USOL RX CQ\n");
-               goto out_free_els_cq;
-       }
-       phba->sli4_hba.rxq_cq = qdesc;
 
        /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
        phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5029,7 +5698,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2577 Failed allocate memory for fast-path "
                                "CQ record array\n");
-               goto out_free_rxq_cq;
+               goto out_free_els_cq;
        }
        for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5142,9 +5811,6 @@ out_free_fcp_cq:
                phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
        }
        kfree(phba->sli4_hba.fcp_cq);
-out_free_rxq_cq:
-       lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
-       phba->sli4_hba.rxq_cq = NULL;
 out_free_els_cq:
        lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
        phba->sli4_hba.els_cq = NULL;
@@ -5172,7 +5838,7 @@ out_error:
  * operation.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -5201,10 +5867,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
        phba->sli4_hba.dat_rq = NULL;
 
-       /* Release unsolicited receive complete queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
-       phba->sli4_hba.rxq_cq = NULL;
-
        /* Release ELS complete queue */
        lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
        phba->sli4_hba.els_cq = NULL;
@@ -5240,7 +5902,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
  * operation.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -5337,25 +5999,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.els_cq->queue_id,
                        phba->sli4_hba.sp_eq->queue_id);
 
-       /* Set up slow-path Unsolicited Receive Complete Queue */
-       if (!phba->sli4_hba.rxq_cq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0532 USOL RX CQ not allocated\n");
-               goto out_destroy_els_cq;
-       }
-       rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
-                           LPFC_RCQ, LPFC_USOL);
-       if (rc) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0533 Failed setup of slow-path USOL RX CQ: "
-                               "rc = 0x%x\n", rc);
-               goto out_destroy_els_cq;
-       }
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
-                       phba->sli4_hba.rxq_cq->queue_id,
-                       phba->sli4_hba.sp_eq->queue_id);
-
        /* Set up fast-path FCP Response Complete Queue */
        for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
                if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5461,7 +6104,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                goto out_destroy_fcp_wq;
        }
        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
-                           phba->sli4_hba.rxq_cq, LPFC_USOL);
+                           phba->sli4_hba.els_cq, LPFC_USOL);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0541 Failed setup of Receive Queue: "
@@ -5473,7 +6116,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        "parent cq-id=%d\n",
                        phba->sli4_hba.hdr_rq->queue_id,
                        phba->sli4_hba.dat_rq->queue_id,
-                       phba->sli4_hba.rxq_cq->queue_id);
+                       phba->sli4_hba.els_cq->queue_id);
        return 0;
 
 out_destroy_fcp_wq:
@@ -5485,8 +6128,6 @@ out_destroy_mbx_wq:
 out_destroy_fcp_cq:
        for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
                lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
-       lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
-out_destroy_els_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 out_destroy_mbx_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5506,7 +6147,7 @@ out_error:
  * operation.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -5528,8 +6169,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
        /* Unset ELS complete queue */
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
-       /* Unset unsolicited receive complete queue */
-       lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
        /* Unset FCP response complete queue */
        for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
                lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -5553,7 +6192,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
  * Later, this can be used for all the slow-path events.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      -ENOMEM - No availble memory
  **/
 static int
@@ -5714,7 +6353,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
  * all resources assigned to the PCI function which originates this request.
  *
  * Return codes
- *      0 - sucessful
+ *      0 - successful
  *      ENOMEM - No availble memory
  *      EIO - The mailbox failed to complete successfully.
  **/
@@ -5864,7 +6503,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
                spin_lock_irqsave(&phba->hbalock, flags);
                /* Mark the FCFI is no longer registered */
                phba->fcf.fcf_flag &=
-                       ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
+                       ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
                spin_unlock_irqrestore(&phba->hbalock, flags);
        }
 }
@@ -5877,7 +6516,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
  * with SLI-4 interface spec.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -5894,22 +6533,30 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                pdev = phba->pcidev;
 
        /* Set the device DMA mask size */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+        || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+                || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
                        return error;
+               }
+       }
 
        /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
         * number of bytes required by each mapping. They are actually
-        * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
+        * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
         */
-       phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
-       bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
-
-       phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
-       bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
+       if (pci_resource_start(pdev, 0)) {
+               phba->pci_bar0_map = pci_resource_start(pdev, 0);
+               bar0map_len = pci_resource_len(pdev, 0);
+       } else {
+               phba->pci_bar0_map = pci_resource_start(pdev, 1);
+               bar0map_len = pci_resource_len(pdev, 1);
+       }
+       phba->pci_bar1_map = pci_resource_start(pdev, 2);
+       bar1map_len = pci_resource_len(pdev, 2);
 
-       phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
-       bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
+       phba->pci_bar2_map = pci_resource_start(pdev, 4);
+       bar2map_len = pci_resource_len(pdev, 4);
 
        /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
        phba->sli4_hba.conf_regs_memmap_p =
@@ -6006,7 +6653,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
  * will be left with MSI-X enabled and leaks its vectors.
  *
  * Return codes
- *   0 - sucessful
+ *   0 - successful
  *   other values - error
  **/
 static int
@@ -6138,7 +6785,7 @@ lpfc_sli_disable_msix(struct lpfc_hba *phba)
  * is done in this function.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  */
 static int
@@ -6197,7 +6844,7 @@ lpfc_sli_disable_msi(struct lpfc_hba *phba)
  * MSI-X -> MSI -> IRQ.
  *
  * Return codes
- *   0 - sucessful
+ *   0 - successful
  *   other values - error
  **/
 static uint32_t
@@ -6287,7 +6934,7 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
  * enabled and leaks its vectors.
  *
  * Return codes
- * 0 - sucessful
+ * 0 - successful
  * other values - error
  **/
 static int
@@ -6397,7 +7044,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
  * which is done in this function.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static int
@@ -6462,7 +7109,7 @@ lpfc_sli4_disable_msi(struct lpfc_hba *phba)
  * MSI-X -> MSI -> IRQ.
  *
  * Return codes
- *     0 - sucessful
+ *     0 - successful
  *     other values - error
  **/
 static uint32_t
@@ -6654,6 +7301,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        phba->pport->work_port_events = 0;
 }
 
+ /**
+ * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       int rc;
+       struct lpfc_mqe *mqe;
+       struct lpfc_pc_sli4_params *sli4_params;
+       uint32_t mbox_tmo;
+
+       rc = 0;
+       mqe = &mboxq->u.mqe;
+
+       /* Read the port's SLI4 Parameters port capabilities */
+       lpfc_sli4_params(mboxq);
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
+               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+       }
+
+       if (unlikely(rc))
+               return 1;
+
+       sli4_params = &phba->sli4_hba.pc_sli4_params;
+       sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+       sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+       sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+       sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+                                            &mqe->un.sli4_params);
+       sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+                                            &mqe->un.sli4_params);
+       sli4_params->proto_types = mqe->un.sli4_params.word3;
+       sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+       sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+       sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+       sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+       sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+       sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+       sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+       sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+       sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+       sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+       sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+       sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+       sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+       sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+       sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+       sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+       sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+       sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+       sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+       sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+       return rc;
+}
+
 /**
  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  * @pdev: pointer to PCI device
@@ -6676,6 +7390,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
        struct lpfc_hba   *phba;
        struct lpfc_vport *vport = NULL;
+       struct Scsi_Host  *shost = NULL;
        int error;
        uint32_t cfg_mode, intr_mode;
 
@@ -6754,6 +7469,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
                goto out_destroy_shost;
        }
 
+       shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
        /* Now, trying to enable interrupt and bring up the device */
        cfg_mode = phba->cfg_use_msi;
        while (true) {
@@ -6820,6 +7536,8 @@ out_unset_pci_mem_s3:
        lpfc_sli_pci_mem_unset(phba);
 out_disable_pci_dev:
        lpfc_disable_pci_dev(phba);
+       if (shost)
+               scsi_host_put(shost);
 out_free_phba:
        lpfc_hba_free(phba);
        return error;
@@ -6990,6 +7708,13 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
        /* Restore device state from PCI config space */
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
+
+       /*
+        * As the new kernel behavior of pci_restore_state() API call clears
+        * device saved_state flag, need to save the restored state again.
+        */
+       pci_save_state(pdev);
+
        if (pdev->is_busmaster)
                pci_set_master(pdev);
 
@@ -7024,6 +7749,73 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
 }
 
 /**
+ * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot recover. It
+ * aborts and stops all the on-going I/Os on the pci device.
+ **/
+static void
+lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2723 PCI channel I/O abort preparing for recovery\n");
+       /* Prepare for bringing HBA offline */
+       lpfc_offline_prep(phba);
+       /* Clear sli active flag to prevent sysfs access to HBA */
+       spin_lock_irq(&phba->hbalock);
+       phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+       spin_unlock_irq(&phba->hbalock);
+       /* Stop and flush all I/Os and bring HBA offline */
+       lpfc_offline(phba);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring  *pring;
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2710 PCI channel disable preparing for reset\n");
+       /* Disable interrupt and pci device */
+       lpfc_sli_disable_intr(phba);
+       pci_disable_device(phba->pcidev);
+       /*
+        * There may be I/Os dropped by the firmware.
+        * Error iocb (I/O) on txcmplq and let the SCSI layer
+        * retry it after re-establishing link.
+        */
+       pring = &psli->ring[psli->fcp_ring];
+       lpfc_sli_abort_iocb_ring(phba, pring);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2711 PCI channel permanent disable for failure\n");
+       /* Clean up all driver's outstanding SCSI I/Os */
+       lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
  * @pdev: pointer to PCI device.
  * @state: the current PCI connection state.
@@ -7037,6 +7829,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
  * as desired.
  *
  * Return codes
+ *     PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
  *     PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  **/
@@ -7045,33 +7838,30 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_sli_ring  *pring;
 
-       if (state == pci_channel_io_perm_failure) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0472 PCI channel I/O permanent failure\n");
-               /* Block all SCSI devices' I/Os on the host */
-               lpfc_scsi_dev_block(phba);
-               /* Clean up all driver's outstanding SCSI I/Os */
-               lpfc_sli_flush_fcp_rings(phba);
+       /* Block all SCSI devices' I/Os on the host */
+       lpfc_scsi_dev_block(phba);
+
+       switch (state) {
+       case pci_channel_io_normal:
+               /* Non-fatal error, prepare for recovery */
+               lpfc_sli_prep_dev_for_recover(phba);
+               return PCI_ERS_RESULT_CAN_RECOVER;
+       case pci_channel_io_frozen:
+               /* Fatal error, prepare for slot reset */
+               lpfc_sli_prep_dev_for_reset(phba);
+               return PCI_ERS_RESULT_NEED_RESET;
+       case pci_channel_io_perm_failure:
+               /* Permanent failure, prepare for device down */
+               lpfc_prep_dev_for_perm_failure(phba);
                return PCI_ERS_RESULT_DISCONNECT;
+       default:
+               /* Unknown state, prepare and request slot reset */
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0472 Unknown PCI error state: x%x\n", state);
+               lpfc_sli_prep_dev_for_reset(phba);
+               return PCI_ERS_RESULT_NEED_RESET;
        }
-
-       pci_disable_device(pdev);
-       /*
-        * There may be I/Os dropped by the firmware.
-        * Error iocb (I/O) on txcmplq and let the SCSI layer
-        * retry it after re-establishing link.
-        */
-       pring = &psli->ring[psli->fcp_ring];
-       lpfc_sli_abort_iocb_ring(phba, pring);
-
-       /* Disable interrupt */
-       lpfc_sli_disable_intr(phba);
-
-       /* Request a slot reset. */
-       return PCI_ERS_RESULT_NEED_RESET;
 }
 
 /**
@@ -7108,6 +7898,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
        }
 
        pci_restore_state(pdev);
+
+       /*
+        * As the new kernel behavior of pci_restore_state() API call clears
+        * device saved_state flag, need to save the restored state again.
+        */
+       pci_save_state(pdev);
+
        if (pdev->is_busmaster)
                pci_set_master(pdev);
 
@@ -7151,7 +7948,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
+       /* Bring the device online */
        lpfc_online(phba);
+
+       /* Clean up Advanced Error Reporting (AER) if needed */
+       if (phba->hba_flag & HBA_AER_ENABLED)
+               pci_cleanup_aer_uncorrect_error_status(pdev);
 }
 
 /**
@@ -7165,16 +7967,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
 {
        int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
 
-       if (max_xri <= 100)
-               return 4;
-       else if (max_xri <= 256)
-               return 8;
-       else if (max_xri <= 512)
-               return 16;
-       else if (max_xri <= 1024)
-               return 32;
-       else
-               return 48;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               if (max_xri <= 100)
+                       return 10;
+               else if (max_xri <= 256)
+                       return 25;
+               else if (max_xri <= 512)
+                       return 50;
+               else if (max_xri <= 1024)
+                       return 100;
+               else
+                       return 150;
+       } else
+               return 0;
 }
 
 /**
@@ -7200,6 +8005,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
        struct lpfc_hba   *phba;
        struct lpfc_vport *vport = NULL;
+       struct Scsi_Host  *shost = NULL;
        int error;
        uint32_t cfg_mode, intr_mode;
        int mcnt;
@@ -7280,6 +8086,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                goto out_destroy_shost;
        }
 
+       shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
        /* Now, trying to enable interrupt and bring up the device */
        cfg_mode = phba->cfg_use_msi;
        while (true) {
@@ -7293,6 +8100,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                        error = -ENODEV;
                        goto out_free_sysfs_attr;
                }
+               /* Default to single FCP EQ for non-MSI-X */
+               if (phba->intr_type != MSIX)
+                       phba->cfg_fcp_eq_count = 1;
                /* Set up SLI-4 HBA */
                if (lpfc_sli4_hba_setup(phba)) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7327,6 +8137,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Perform post initialization setup */
        lpfc_post_init_setup(phba);
 
+       /* Check if there are static vports to be created. */
+       lpfc_create_static_vport(phba);
+
        return 0;
 
 out_disable_intr:
@@ -7345,6 +8158,8 @@ out_unset_pci_mem_s4:
        lpfc_sli4_pci_mem_unset(phba);
 out_disable_pci_dev:
        lpfc_disable_pci_dev(phba);
+       if (shost)
+               scsi_host_put(shost);
 out_free_phba:
        lpfc_hba_free(phba);
        return error;
@@ -7499,6 +8314,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
        /* Restore device state from PCI config space */
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
+
+       /*
+        * As the new kernel behavior of pci_restore_state() API call clears
+        * device saved_state flag, need to save the restored state again.
+        */
+       pci_save_state(pdev);
+
        if (pdev->is_busmaster)
                pci_set_master(pdev);
 
@@ -7616,20 +8438,17 @@ static int __devinit
 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
        int rc;
-       uint16_t dev_id;
+       struct lpfc_sli_intf intf;
 
-       if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
+       if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
                return -ENODEV;
 
-       switch (dev_id) {
-       case PCI_DEVICE_ID_TIGERSHARK:
-       case PCI_DEVICE_ID_TIGERSHARK_S:
+       if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
+           (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
                rc = lpfc_pci_probe_one_s4(pdev, pid);
-               break;
-       default:
+       else
                rc = lpfc_pci_probe_one_s3(pdev, pid);
-               break;
-       }
+
        return rc;
 }
 
@@ -7922,7 +8741,9 @@ static struct pci_device_id lpfc_id_table[] = {
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
                PCI_ANY_ID, PCI_ANY_ID, },
-       {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
+       {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
                PCI_ANY_ID, PCI_ANY_ID, },
        { 0 }
 };
@@ -8006,15 +8827,15 @@ lpfc_exit(void)
        if (lpfc_enable_npiv)
                fc_release_transport(lpfc_vport_transport_template);
        if (_dump_buf_data) {
-               printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
-                               "at 0x%p\n",
+               printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
+                               "_dump_buf_data at 0x%p\n",
                                (1L << _dump_buf_data_order), _dump_buf_data);
                free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
        }
 
        if (_dump_buf_dif) {
-               printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
-                               "at 0x%p\n",
+               printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
+                               "_dump_buf_dif at 0x%p\n",
                                (1L << _dump_buf_dif_order), _dump_buf_dif);
                free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
        }