include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_init.c
index 88e02a4..774663e 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/ctype.h>
 #include <linux/aer.h>
+#include <linux/slab.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -2199,8 +2200,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
 void
 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
 {
-       /* Clear pending FCF rediscovery wait timer */
-       phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+       /* Clear pending FCF rediscovery wait and failover in progress flags */
+       phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
+                               FCF_DEAD_DISC |
+                               FCF_ACVL_DISC);
        /* Now, try to stop the timer */
        del_timer(&phba->fcf.redisc_wait);
 }
@@ -2941,6 +2944,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
        /* FCF rediscovery event to worker thread */
        phba->fcf.fcf_flag |= FCF_REDISC_EVT;
        spin_unlock_irq(&phba->hbalock);
+       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                       "2776 FCF rediscover wait timer expired, post "
+                       "a worker thread event for FCF table scan\n");
        /* wake up worker thread */
        lpfc_worker_wake_up(phba);
 }
@@ -3212,6 +3218,68 @@ out_free_pmb:
 }
 
 /**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+       struct lpfc_nodelist *ndlp;
+       struct Scsi_Host *shost;
+       struct lpfc_hba *phba;
+
+       if (!vport)
+               return NULL;
+       ndlp = lpfc_findnode_did(vport, Fabric_DID);
+       if (!ndlp)
+               return NULL;
+       phba = vport->phba;
+       if (!phba)
+               return NULL;
+       if (phba->pport->port_state <= LPFC_FLOGI)
+               return NULL;
+       /* If virtual link is not yet instantiated ignore CVL */
+       if (vport->port_state <= LPFC_FDISC)
+               return NULL;
+       shost = lpfc_shost_from_vport(vport);
+       if (!shost)
+               return NULL;
+       lpfc_linkdown_port(vport);
+       lpfc_cleanup_pending_mbox(vport);
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag |= FC_VPORT_CVL_RCVD;
+       spin_unlock_irq(shost->host_lock);
+
+       return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       int i;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+                       lpfc_sli4_perform_vport_cvl(vports[i]);
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
  * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3227,7 +3295,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
        struct lpfc_vport *vport;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host  *shost;
-       uint32_t link_state;
        int active_vlink_present;
        struct lpfc_vport **vports;
        int i;
@@ -3237,10 +3304,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
        switch (event_type) {
        case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
        case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                       "2546 New FCF found index 0x%x tag 0x%x\n",
-                       acqe_fcoe->index,
-                       acqe_fcoe->event_tag);
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                       "2546 New FCF found/FCF parameter modified event: "
+                       "evt_tag:x%x, fcf_index:x%x\n",
+                       acqe_fcoe->event_tag, acqe_fcoe->index);
+
                spin_lock_irq(&phba->hbalock);
                if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
                    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3251,6 +3319,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                        spin_unlock_irq(&phba->hbalock);
                        break;
                }
+
                if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
                        /*
                         * If fast FCF failover rescan event is pending,
@@ -3261,12 +3330,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                }
                spin_unlock_irq(&phba->hbalock);
 
-               /* Read the FCF table and re-discover SAN. */
-               rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+               if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
+                   !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+                       /*
+                        * During period of FCF discovery, read the FCF
+                        * table record indexed by the event to update
+                        * FCF round robin failover eligible FCF bmask.
+                        */
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+                                       LOG_DISCOVERY,
+                                       "2779 Read new FCF record with "
+                                       "fcf_index:x%x for updating FCF "
+                                       "round robin failover bmask\n",
+                                       acqe_fcoe->index);
+                       rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
+               }
+
+               /* Otherwise, scan the entire FCF table and re-discover SAN */
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                               "2770 Start FCF table scan due to new FCF "
+                               "event: evt_tag:x%x, fcf_index:x%x\n",
+                               acqe_fcoe->event_tag, acqe_fcoe->index);
+               rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+                                                    LPFC_FCOE_FCF_GET_FIRST);
                if (rc)
-                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                                       "2547 Read FCF record failed 0x%x\n",
-                                       rc);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                                       "2547 Issue FCF scan read FCF mailbox "
+                                       "command failed 0x%x\n", rc);
                break;
 
        case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3277,47 +3367,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                break;
 
        case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2549 FCF disconnected from network index 0x%x"
                        " tag 0x%x\n", acqe_fcoe->index,
                        acqe_fcoe->event_tag);
                /* If the event is not for currently used fcf do nothing */
                if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
                        break;
-               /*
-                * Currently, driver support only one FCF - so treat this as
-                * a link down, but save the link state because we don't want
-                * it to be changed to Link Down unless it is already down.
+               /* We request port to rediscover the entire FCF table for
+                * a fast recovery from case that the current FCF record
+                * is no longer valid if we are not in the middle of FCF
+                * failover process already.
                 */
-               link_state = phba->link_state;
-               lpfc_linkdown(phba);
-               phba->link_state = link_state;
-               /* Unregister FCF if no devices connected to it */
-               lpfc_unregister_unused_fcf(phba);
+               spin_lock_irq(&phba->hbalock);
+               if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+                       spin_unlock_irq(&phba->hbalock);
+                       /* Update FLOGI FCF failover eligible FCF bmask */
+                       lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
+                       break;
+               }
+               /* Mark the fast failover process in progress */
+               phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+               spin_unlock_irq(&phba->hbalock);
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                               "2771 Start FCF fast failover process due to "
+                               "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+                               "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
+               rc = lpfc_sli4_redisc_fcf_table(phba);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+                                       LOG_DISCOVERY,
+                                       "2772 Issue FCF rediscover mabilbox "
+                                       "command failed, fail through to FCF "
+                                       "dead event\n");
+                       spin_lock_irq(&phba->hbalock);
+                       phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+                       spin_unlock_irq(&phba->hbalock);
+                       /*
+                        * Last resort will fail over by treating this
+                        * as a link down to FCF registration.
+                        */
+                       lpfc_sli4_fcf_dead_failthrough(phba);
+               } else
+                       /* Handling fast FCF failover to a DEAD FCF event
+                        * is considered equalivant to receiving CVL to all
+                        * vports.
+                        */
+                       lpfc_sli4_perform_all_vport_cvl(phba);
                break;
        case LPFC_FCOE_EVENT_TYPE_CVL:
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2718 Clear Virtual Link Received for VPI 0x%x"
                        " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
                vport = lpfc_find_vport_by_vpid(phba,
                                acqe_fcoe->index - phba->vpi_base);
-               if (!vport)
-                       break;
-               ndlp = lpfc_findnode_did(vport, Fabric_DID);
+               ndlp = lpfc_sli4_perform_vport_cvl(vport);
                if (!ndlp)
                        break;
-               shost = lpfc_shost_from_vport(vport);
-               if (phba->pport->port_state <= LPFC_FLOGI)
-                       break;
-               /* If virtual link is not yet instantiated ignore CVL */
-               if (vport->port_state <= LPFC_FDISC)
-                       break;
-
-               lpfc_linkdown_port(vport);
-               lpfc_cleanup_pending_mbox(vport);
-               spin_lock_irq(shost->host_lock);
-               vport->fc_flag |= FC_VPORT_CVL_RCVD;
-               spin_unlock_irq(shost->host_lock);
                active_vlink_present = 0;
 
                vports = lpfc_create_vport_work_array(phba);
@@ -3340,6 +3446,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                         * re-instantiate the Vlink using FDISC.
                         */
                        mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       shost = lpfc_shost_from_vport(vport);
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
                        spin_unlock_irq(shost->host_lock);
@@ -3350,15 +3457,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                         * Otherwise, we request port to rediscover
                         * the entire FCF table for a fast recovery
                         * from possible case that the current FCF
-                        * is no longer valid.
+                        * is no longer valid if we are not already
+                        * in the FCF failover process.
                         */
+                       spin_lock_irq(&phba->hbalock);
+                       if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+                               spin_unlock_irq(&phba->hbalock);
+                               break;
+                       }
+                       /* Mark the fast failover process in progress */
+                       phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+                       spin_unlock_irq(&phba->hbalock);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+                                       LOG_DISCOVERY,
+                                       "2773 Start FCF fast failover due "
+                                       "to CVL event: evt_tag:x%x\n",
+                                       acqe_fcoe->event_tag);
                        rc = lpfc_sli4_redisc_fcf_table(phba);
-                       if (rc)
+                       if (rc) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+                                               LOG_DISCOVERY,
+                                               "2774 Issue FCF rediscover "
+                                               "mabilbox command failed, "
+                                               "through to CVL event\n");
+                               spin_lock_irq(&phba->hbalock);
+                               phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+                               spin_unlock_irq(&phba->hbalock);
                                /*
                                 * Last resort will be re-try on the
                                 * the current registered FCF entry.
                                 */
                                lpfc_retry_pport_discovery(phba);
+                       }
                }
                break;
        default:
@@ -3455,11 +3585,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
 
        /* Scan FCF table from the first entry to re-discover SAN */
-       rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+       lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+                       "2777 Start FCF table scan after FCF "
+                       "rediscovery quiescent period over\n");
+       rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
        if (rc)
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                               "2747 Post FCF rediscovery read FCF record "
-                               "failed 0x%x\n", rc);
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+                               "2747 Issue FCF scan read FCF mailbox "
+                               "command failed 0x%x\n", rc);
 }
 
 /**
@@ -3751,6 +3884,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
+       int longs;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -3927,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_active_sgl;
        }
 
+       /* Allocate eligible FCF bmask memory for FCF round robin failover */
+       longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+       phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+                                        GFP_KERNEL);
+       if (!phba->fcf.fcf_rr_bmask) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2759 Failed allocate memory for FCF round "
+                               "robin failover bmask\n");
+               goto out_remove_rpi_hdrs;
+       }
+
        phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
                                    phba->cfg_fcp_eq_count), GFP_KERNEL);
        if (!phba->sli4_hba.fcp_eq_hdl) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2572 Failed allocate memory for fast-path "
                                "per-EQ handle array\n");
-               goto out_remove_rpi_hdrs;
+               goto out_free_fcf_rr_bmask;
        }
 
        phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3986,6 +4131,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
 out_free_fcp_eq_hdl:
        kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_fcf_rr_bmask:
+       kfree(phba->fcf.fcf_rr_bmask);
 out_remove_rpi_hdrs:
        lpfc_sli4_remove_rpi_hdrs(phba);
 out_free_active_sgl:
@@ -4031,6 +4178,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
        lpfc_sli4_remove_rpi_hdrs(phba);
        lpfc_sli4_remove_rpis(phba);
 
+       /* Free eligible FCF index bmask */
+       kfree(phba->fcf.fcf_rr_bmask);
+
        /* Free the ELS sgl list */
        lpfc_free_active_sgl(phba);
        lpfc_free_sgl_list(phba);