nfsd4: setclientid_confirm callback-change fixes
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_hbadisc.c
index 2c21641..311ed6d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_transport_fc.h>
 
 #include "lpfc_hw.h"
+#include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_sli.h"
 #include "lpfc_scsi.h"
 #include "lpfc.h"
 #include "lpfc_logmsg.h"
 #include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
 
 /* AlpaArray for assignment of scsid for scan-down and bind_method */
 static uint8_t lpfcAlpaArray[] = {
@@ -54,7 +57,8 @@ static uint8_t lpfcAlpaArray[] = {
        0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
 };
 
-static void lpfc_disc_timeout_handler(struct lpfc_hba *);
+static void lpfc_disc_timeout_handler(struct lpfc_vport *);
+static void lpfc_disc_flush_list(struct lpfc_vport *vport);
 
 void
 lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -66,7 +70,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
        rdata = rport->dd_data;
        ndlp = rdata->pnode;
 
-       if (!ndlp) {
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
                if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
                        printk(KERN_ERR "Cannot find remote node"
                        " to terminate I/O Data x%x\n",
@@ -74,16 +78,17 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
                return;
        }
 
-       phba = ndlp->nlp_phba;
+       phba  = ndlp->vport->phba;
+
+       lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+               "rport terminate: sid:x%x did:x%x flg:x%x",
+               ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
 
-       spin_lock_irq(phba->host->host_lock);
        if (ndlp->nlp_sid != NLP_NO_SID) {
-               lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
-                       ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+               lpfc_sli_abort_iocb(ndlp->vport,
+                       &phba->sli.ring[phba->sli.fcp_ring],
+                       ndlp->nlp_sid, 0, LPFC_CTX_TGT);
        }
-       spin_unlock_irq(phba->host->host_lock);
-
-       return;
 }
 
 /*
@@ -94,99 +99,341 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 {
        struct lpfc_rport_data *rdata;
        struct lpfc_nodelist * ndlp;
+       struct lpfc_vport *vport;
+       struct lpfc_hba   *phba;
+       struct lpfc_work_evt *evtp;
+       int  put_node;
+       int  put_rport;
+
+       rdata = rport->dd_data;
+       ndlp = rdata->pnode;
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+               return;
+
+       vport = ndlp->vport;
+       phba  = vport->phba;
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+               "rport devlosscb: sid:x%x did:x%x flg:x%x",
+               ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+       /* Don't defer this if we are in the process of deleting the vport
+        * or unloading the driver. The unload will cleanup the node
+        * appropriately we just need to cleanup the ndlp rport info here.
+        */
+       if (vport->load_flag & FC_UNLOADING) {
+               put_node = rdata->pnode != NULL;
+               put_rport = ndlp->rport != NULL;
+               rdata->pnode = NULL;
+               ndlp->rport = NULL;
+               if (put_node)
+                       lpfc_nlp_put(ndlp);
+               if (put_rport)
+                       put_device(&rport->dev);
+               return;
+       }
+
+       if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+               return;
+
+       evtp = &ndlp->dev_loss_evt;
+
+       if (!list_empty(&evtp->evt_listp))
+               return;
+
+       spin_lock_irq(&phba->hbalock);
+       /* We need to hold the node by incrementing the reference
+        * count until this queued work is done
+        */
+       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+       if (evtp->evt_arg1) {
+               evtp->evt = LPFC_EVT_DEV_LOSS;
+               list_add_tail(&evtp->evt_listp, &phba->work_list);
+               lpfc_worker_wake_up(phba);
+       }
+       spin_unlock_irq(&phba->hbalock);
+
+       return;
+}
+
+/*
+ * This function is called from the worker thread when dev_loss_tmo
+ * expire.
+ */
+static void
+lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_rport_data *rdata;
+       struct fc_rport   *rport;
+       struct lpfc_vport *vport;
+       struct lpfc_hba   *phba;
        uint8_t *name;
+       int  put_node;
+       int  put_rport;
        int warn_on = 0;
-       struct lpfc_hba *phba;
+
+       rport = ndlp->rport;
+
+       if (!rport)
+               return;
 
        rdata = rport->dd_data;
-       ndlp = rdata->pnode;
+       name = (uint8_t *) &ndlp->nlp_portname;
+       vport = ndlp->vport;
+       phba  = vport->phba;
 
-       if (!ndlp) {
-               if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
-                       printk(KERN_ERR "Cannot find remote node"
-                       " for rport in dev_loss_tmo_callbk x%x\n",
-                       rport->port_id);
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+               "rport devlosstmo:did:x%x type:x%x id:x%x",
+               ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+
+       /* Don't defer this if we are in the process of deleting the vport
+        * or unloading the driver. The unload will cleanup the node
+        * appropriately we just need to cleanup the ndlp rport info here.
+        */
+       if (vport->load_flag & FC_UNLOADING) {
+               if (ndlp->nlp_sid != NLP_NO_SID) {
+                       /* flush the target */
+                       lpfc_sli_abort_iocb(vport,
+                                       &phba->sli.ring[phba->sli.fcp_ring],
+                                       ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+               }
+               put_node = rdata->pnode != NULL;
+               put_rport = ndlp->rport != NULL;
+               rdata->pnode = NULL;
+               ndlp->rport = NULL;
+               if (put_node)
+                       lpfc_nlp_put(ndlp);
+               if (put_rport)
+                       put_device(&rport->dev);
                return;
        }
 
-       name = (uint8_t *)&ndlp->nlp_portname;
-       phba = ndlp->nlp_phba;
+       if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                                "0284 Devloss timeout Ignored on "
+                                "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+                                "NPort x%x\n",
+                                *name, *(name+1), *(name+2), *(name+3),
+                                *(name+4), *(name+5), *(name+6), *(name+7),
+                                ndlp->nlp_DID);
+               return;
+       }
 
-       spin_lock_irq(phba->host->host_lock);
+       if (ndlp->nlp_type & NLP_FABRIC) {
+               /* We will clean up these Nodes in linkup */
+               put_node = rdata->pnode != NULL;
+               put_rport = ndlp->rport != NULL;
+               rdata->pnode = NULL;
+               ndlp->rport = NULL;
+               if (put_node)
+                       lpfc_nlp_put(ndlp);
+               if (put_rport)
+                       put_device(&rport->dev);
+               return;
+       }
 
        if (ndlp->nlp_sid != NLP_NO_SID) {
                warn_on = 1;
                /* flush the target */
-               lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
-                       ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+               lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
        }
-       if (phba->fc_flag & FC_UNLOADING)
-               warn_on = 0;
-
-       spin_unlock_irq(phba->host->host_lock);
 
        if (warn_on) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                               "%d:0203 Devloss timeout on "
-                               "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
-                               "NPort x%x Data: x%x x%x x%x\n",
-                               phba->brd_no,
-                               *name, *(name+1), *(name+2), *(name+3),
-                               *(name+4), *(name+5), *(name+6), *(name+7),
-                               ndlp->nlp_DID, ndlp->nlp_flag,
-                               ndlp->nlp_state, ndlp->nlp_rpi);
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0203 Devloss timeout on "
+                                "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+                                "NPort x%06x Data: x%x x%x x%x\n",
+                                *name, *(name+1), *(name+2), *(name+3),
+                                *(name+4), *(name+5), *(name+6), *(name+7),
+                                ndlp->nlp_DID, ndlp->nlp_flag,
+                                ndlp->nlp_state, ndlp->nlp_rpi);
        } else {
-               lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-                               "%d:0204 Devloss timeout on "
-                               "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
-                               "NPort x%x Data: x%x x%x x%x\n",
-                               phba->brd_no,
-                               *name, *(name+1), *(name+2), *(name+3),
-                               *(name+4), *(name+5), *(name+6), *(name+7),
-                               ndlp->nlp_DID, ndlp->nlp_flag,
-                               ndlp->nlp_state, ndlp->nlp_rpi);
-       }
-
-       if (!(phba->fc_flag & FC_UNLOADING) &&
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                                "0204 Devloss timeout on "
+                                "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+                                "NPort x%06x Data: x%x x%x x%x\n",
+                                *name, *(name+1), *(name+2), *(name+3),
+                                *(name+4), *(name+5), *(name+6), *(name+7),
+                                ndlp->nlp_DID, ndlp->nlp_flag,
+                                ndlp->nlp_state, ndlp->nlp_rpi);
+       }
+
+       put_node = rdata->pnode != NULL;
+       put_rport = ndlp->rport != NULL;
+       rdata->pnode = NULL;
+       ndlp->rport = NULL;
+       if (put_node)
+               lpfc_nlp_put(ndlp);
+       if (put_rport)
+               put_device(&rport->dev);
+
+       if (!(vport->load_flag & FC_UNLOADING) &&
            !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
-           !(ndlp->nlp_flag & NLP_NPR_2B_DISC))
-               lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
-       else {
-               rdata->pnode = NULL;
-               ndlp->rport = NULL;
+           !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+           (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
+               lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+}
+
+/**
+ * lpfc_alloc_fast_evt: Allocates data structure for posting event.
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+       struct lpfc_fast_path_event *ret;
+
+       /* If there are lot of fast event do not exhaust memory due to this */
+       if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+               return NULL;
+
+       ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+                       GFP_ATOMIC);
+       if (ret)
+               atomic_inc(&phba->fast_event_count);
+       INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+       ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+       return ret;
+}
+
+/**
+ * lpfc_free_fast_evt: Frees event data structure.
+ * @phba: Pointer to hba context object.
+ * @evt:  Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
+void
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+               struct lpfc_fast_path_event *evt) {
+
+       atomic_dec(&phba->fast_event_count);
+       kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt: Posts events generated from fast path.
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+               struct lpfc_work_evt *evtp)
+{
+       unsigned long evt_category, evt_sub_category;
+       struct lpfc_fast_path_event *fast_evt_data;
+       char *evt_data;
+       uint32_t evt_data_size;
+       struct Scsi_Host *shost;
+
+       fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+               work_evt);
+
+       evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+       evt_sub_category = (unsigned long) fast_evt_data->un.
+                       fabric_evt.subcategory;
+       shost = lpfc_shost_from_vport(fast_evt_data->vport);
+       if (evt_category == FC_REG_FABRIC_EVENT) {
+               if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+                       evt_data = (char *) &fast_evt_data->un.read_check_error;
+                       evt_data_size = sizeof(fast_evt_data->un.
+                               read_check_error);
+               } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+                       (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+                       evt_data = (char *) &fast_evt_data->un.fabric_evt;
+                       evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+               } else {
+                       lpfc_free_fast_evt(phba, fast_evt_data);
+                       return;
+               }
+       } else if (evt_category == FC_REG_SCSI_EVENT) {
+               switch (evt_sub_category) {
+               case LPFC_EVENT_QFULL:
+               case LPFC_EVENT_DEVBSY:
+                       evt_data = (char *) &fast_evt_data->un.scsi_evt;
+                       evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+                       break;
+               case LPFC_EVENT_CHECK_COND:
+                       evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+                       evt_data_size =  sizeof(fast_evt_data->un.
+                               check_cond_evt);
+                       break;
+               case LPFC_EVENT_VARQUEDEPTH:
+                       evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+                       evt_data_size = sizeof(fast_evt_data->un.
+                               queue_depth_evt);
+                       break;
+               default:
+                       lpfc_free_fast_evt(phba, fast_evt_data);
+                       return;
+               }
+       } else {
+               lpfc_free_fast_evt(phba, fast_evt_data);
+               return;
        }
 
+       fc_host_post_vendor_event(shost,
+               fc_get_event_number(),
+               evt_data_size,
+               evt_data,
+               LPFC_NL_VENDOR_ID);
+
+       lpfc_free_fast_evt(phba, fast_evt_data);
        return;
 }
 
 static void
-lpfc_work_list_done(struct lpfc_hba * phba)
+lpfc_work_list_done(struct lpfc_hba *phba)
 {
        struct lpfc_work_evt  *evtp = NULL;
        struct lpfc_nodelist  *ndlp;
        int free_evt;
 
-       spin_lock_irq(phba->host->host_lock);
-       while(!list_empty(&phba->work_list)) {
+       spin_lock_irq(&phba->hbalock);
+       while (!list_empty(&phba->work_list)) {
                list_remove_head((&phba->work_list), evtp, typeof(*evtp),
                                 evt_listp);
-               spin_unlock_irq(phba->host->host_lock);
+               spin_unlock_irq(&phba->hbalock);
                free_evt = 1;
                switch (evtp->evt) {
                case LPFC_EVT_ELS_RETRY:
-                       ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+                       ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
                        lpfc_els_retry_delay_handler(ndlp);
+                       free_evt = 0; /* evt is part of ndlp */
+                       /* decrement the node reference count held
+                        * for this queued work
+                        */
+                       lpfc_nlp_put(ndlp);
+                       break;
+               case LPFC_EVT_DEV_LOSS:
+                       ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+                       lpfc_dev_loss_tmo_handler(ndlp);
                        free_evt = 0;
+                       /* decrement the node reference count held for
+                        * this queued work
+                        */
+                       lpfc_nlp_put(ndlp);
                        break;
                case LPFC_EVT_ONLINE:
-                       if (phba->hba_state < LPFC_LINK_DOWN)
-                               *(int *)(evtp->evt_arg1)  = lpfc_online(phba);
+                       if (phba->link_state < LPFC_LINK_DOWN)
+                               *(int *) (evtp->evt_arg1) = lpfc_online(phba);
                        else
-                               *(int *)(evtp->evt_arg1)  = 0;
+                               *(int *) (evtp->evt_arg1) = 0;
                        complete((struct completion *)(evtp->evt_arg2));
                        break;
                case LPFC_EVT_OFFLINE_PREP:
-                       if (phba->hba_state >= LPFC_LINK_DOWN)
+                       if (phba->link_state >= LPFC_LINK_DOWN)
                                lpfc_offline_prep(phba);
                        *(int *)(evtp->evt_arg1) = 0;
                        complete((struct completion *)(evtp->evt_arg2));
@@ -212,35 +459,40 @@ lpfc_work_list_done(struct lpfc_hba * phba)
                case LPFC_EVT_KILL:
                        lpfc_offline(phba);
                        *(int *)(evtp->evt_arg1)
-                               = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
+                               = (phba->pport->stopped)
+                                       ? 0 : lpfc_sli_brdkill(phba);
                        lpfc_unblock_mgmt_io(phba);
                        complete((struct completion *)(evtp->evt_arg2));
                        break;
+               case LPFC_EVT_FASTPATH_MGMT_EVT:
+                       lpfc_send_fastpath_evt(phba, evtp);
+                       free_evt = 0;
+                       break;
                }
                if (free_evt)
                        kfree(evtp);
-               spin_lock_irq(phba->host->host_lock);
+               spin_lock_irq(&phba->hbalock);
        }
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
 
 }
 
 static void
-lpfc_work_done(struct lpfc_hba * phba)
+lpfc_work_done(struct lpfc_hba *phba)
 {
        struct lpfc_sli_ring *pring;
+       uint32_t ha_copy, status, control, work_port_events;
+       struct lpfc_vport **vports;
+       struct lpfc_vport *vport;
        int i;
-       uint32_t ha_copy;
-       uint32_t control;
-       uint32_t work_hba_events;
 
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irq(&phba->hbalock);
        ha_copy = phba->work_ha;
        phba->work_ha = 0;
-       work_hba_events=phba->work_hba_events;
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
 
        if (ha_copy & HA_ERATT)
+               /* Handle the error attention event */
                lpfc_handle_eratt(phba);
 
        if (ha_copy & HA_MBATT)
@@ -249,64 +501,79 @@ lpfc_work_done(struct lpfc_hba * phba)
        if (ha_copy & HA_LATT)
                lpfc_handle_latt(phba);
 
-       if (work_hba_events & WORKER_DISC_TMO)
-               lpfc_disc_timeout_handler(phba);
-
-       if (work_hba_events & WORKER_ELS_TMO)
-               lpfc_els_timeout_handler(phba);
-
-       if (work_hba_events & WORKER_MBOX_TMO)
-               lpfc_mbox_timeout_handler(phba);
-
-       if (work_hba_events & WORKER_FDMI_TMO)
-               lpfc_fdmi_tmo_handler(phba);
-
-       spin_lock_irq(phba->host->host_lock);
-       phba->work_hba_events &= ~work_hba_events;
-       spin_unlock_irq(phba->host->host_lock);
-
-       for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
-               pring = &phba->sli.ring[i];
-               if ((ha_copy & HA_RXATT)
-                   || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
-                       if (pring->flag & LPFC_STOP_IOCB_MASK) {
-                               pring->flag |= LPFC_DEFERRED_RING_EVENT;
-                       } else {
-                               lpfc_sli_handle_slow_ring_event(phba, pring,
-                                                               (ha_copy &
-                                                                HA_RXMASK));
-                               pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
-                       }
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for(i = 0; i <= phba->max_vpi; i++) {
                        /*
-                        * Turn on Ring interrupts
+                        * We could have no vports in array if unloading, so if
+                        * this happens then just use the pport
                         */
-                       spin_lock_irq(phba->host->host_lock);
-                       control = readl(phba->HCregaddr);
-                       control |= (HC_R0INT_ENA << i);
+                       if (vports[i] == NULL && i == 0)
+                               vport = phba->pport;
+                       else
+                               vport = vports[i];
+                       if (vport == NULL)
+                               break;
+                       spin_lock_irq(&vport->work_port_lock);
+                       work_port_events = vport->work_port_events;
+                       vport->work_port_events &= ~work_port_events;
+                       spin_unlock_irq(&vport->work_port_lock);
+                       if (work_port_events & WORKER_DISC_TMO)
+                               lpfc_disc_timeout_handler(vport);
+                       if (work_port_events & WORKER_ELS_TMO)
+                               lpfc_els_timeout_handler(vport);
+                       if (work_port_events & WORKER_HB_TMO)
+                               lpfc_hb_timeout_handler(phba);
+                       if (work_port_events & WORKER_MBOX_TMO)
+                               lpfc_mbox_timeout_handler(phba);
+                       if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
+                               lpfc_unblock_fabric_iocbs(phba);
+                       if (work_port_events & WORKER_FDMI_TMO)
+                               lpfc_fdmi_timeout_handler(vport);
+                       if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
+                               lpfc_ramp_down_queue_handler(phba);
+                       if (work_port_events & WORKER_RAMP_UP_QUEUE)
+                               lpfc_ramp_up_queue_handler(phba);
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+
+       pring = &phba->sli.ring[LPFC_ELS_RING];
+       status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
+       status >>= (4*LPFC_ELS_RING);
+       if ((status & HA_RXMASK)
+               || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+               if (pring->flag & LPFC_STOP_IOCB_EVENT) {
+                       pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       /* Set the lpfc data pending flag */
+                       set_bit(LPFC_DATA_READY, &phba->data_flags);
+               } else {
+                       pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+                       lpfc_sli_handle_slow_ring_event(phba, pring,
+                                                       (status &
+                                                        HA_RXMASK));
+               }
+               /*
+                * Turn on Ring interrupts
+                */
+               spin_lock_irq(&phba->hbalock);
+               control = readl(phba->HCregaddr);
+               if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+                       lpfc_debugfs_slow_ring_trc(phba,
+                               "WRK Enable ring: cntl:x%x hacopy:x%x",
+                               control, ha_copy, 0);
+
+                       control |= (HC_R0INT_ENA << LPFC_ELS_RING);
                        writel(control, phba->HCregaddr);
                        readl(phba->HCregaddr); /* flush */
-                       spin_unlock_irq(phba->host->host_lock);
                }
+               else {
+                       lpfc_debugfs_slow_ring_trc(phba,
+                               "WRK Ring ok:     cntl:x%x hacopy:x%x",
+                               control, ha_copy, 0);
+               }
+               spin_unlock_irq(&phba->hbalock);
        }
-
-       lpfc_work_list_done (phba);
-
-}
-
-static int
-check_work_wait_done(struct lpfc_hba *phba) {
-
-       spin_lock_irq(phba->host->host_lock);
-       if (phba->work_ha ||
-           phba->work_hba_events ||
-           (!list_empty(&phba->work_list)) ||
-           kthread_should_stop()) {
-               spin_unlock_irq(phba->host->host_lock);
-               return 1;
-       } else {
-               spin_unlock_irq(phba->host->host_lock);
-               return 0;
-       }
+       lpfc_work_list_done(phba);
 }
 
 int
@@ -314,24 +581,29 @@ lpfc_do_work(void *p)
 {
        struct lpfc_hba *phba = p;
        int rc;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
 
        set_user_nice(current, -20);
-       phba->work_wait = &work_waitq;
-
-       while (1) {
-
-               rc = wait_event_interruptible(work_waitq,
-                                               check_work_wait_done(phba));
-               BUG_ON(rc);
-
-               if (kthread_should_stop())
+       phba->data_flags = 0;
+
+       while (!kthread_should_stop()) {
+               /* wait and check worker queue activities */
+               rc = wait_event_interruptible(phba->work_waitq,
+                                       (test_and_clear_bit(LPFC_DATA_READY,
+                                                           &phba->data_flags)
+                                        || kthread_should_stop()));
+               /* Signal wakeup shall terminate the worker thread */
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                                       "0433 Wakeup on signal: rc=x%x\n", rc);
                        break;
+               }
 
+               /* Attend pending lpfc data processing */
                lpfc_work_done(phba);
-
        }
-       phba->work_wait = NULL;
+       phba->worker_thread = NULL;
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "0432 Worker thread stopped.\n");
        return 0;
 }
 
@@ -341,16 +613,17 @@ lpfc_do_work(void *p)
  * embedding it in the IOCB.
  */
 int
-lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
+lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
                      uint32_t evt)
 {
        struct lpfc_work_evt  *evtp;
+       unsigned long flags;
 
        /*
         * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
         * be queued to worker thread for processing
         */
-       evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
+       evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
        if (!evtp)
                return 0;
 
@@ -358,162 +631,213 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
        evtp->evt_arg2  = arg2;
        evtp->evt       = evt;
 
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irqsave(&phba->hbalock, flags);
        list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
+       lpfc_worker_wake_up(phba);
 
        return 1;
 }
 
-int
-lpfc_linkdown(struct lpfc_hba * phba)
+void
+lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 {
-       struct lpfc_sli       *psli;
-       struct lpfc_nodelist  *ndlp, *next_ndlp;
-       struct list_head *listp, *node_list[7];
-       LPFC_MBOXQ_t     *mb;
-       int               rc, i;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_hba  *phba = vport->phba;
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+       int  rc;
 
-       psli = &phba->sli;
-       /* sysfs or selective reset may call this routine to clean up */
-       if (phba->hba_state >= LPFC_LINK_DOWN) {
-               if (phba->hba_state == LPFC_LINK_DOWN)
-                       return 0;
+       list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+               if (!NLP_CHK_NODE_ACT(ndlp))
+                       continue;
+               if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+                       continue;
+               if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
+                       ((vport->port_type == LPFC_NPIV_PORT) &&
+                       (ndlp->nlp_DID == NameServer_DID)))
+                       lpfc_unreg_rpi(vport, ndlp);
 
-               spin_lock_irq(phba->host->host_lock);
-               phba->hba_state = LPFC_LINK_DOWN;
-               spin_unlock_irq(phba->host->host_lock);
+               /* Leave Fabric nodes alone on link down */
+               if (!remove && ndlp->nlp_type & NLP_FABRIC)
+                       continue;
+               rc = lpfc_disc_state_machine(vport, ndlp, NULL,
+                                            remove
+                                            ? NLP_EVT_DEVICE_RM
+                                            : NLP_EVT_DEVICE_RECOVERY);
        }
-
-       fc_host_post_event(phba->host, fc_get_event_number(),
-                       FCH_EVT_LINKDOWN, 0);
-
-       /* Clean up any firmware default rpi's */
-       if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
-               lpfc_unreg_did(phba, 0xffffffff, mb);
-               mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-               if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
-                   == MBX_NOT_FINISHED) {
-                       mempool_free( mb, phba->mbox_mem_pool);
-               }
+       if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+               lpfc_mbx_unreg_vpi(vport);
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+               spin_unlock_irq(shost->host_lock);
        }
+}
 
+void
+lpfc_port_link_failure(struct lpfc_vport *vport)
+{
        /* Cleanup any outstanding RSCN activity */
-       lpfc_els_flush_rscn(phba);
+       lpfc_els_flush_rscn(vport);
 
        /* Cleanup any outstanding ELS commands */
-       lpfc_els_flush_cmd(phba);
-
-       /* Issue a LINK DOWN event to all nodes */
-       node_list[0] = &phba->fc_npr_list;  /* MUST do this list first */
-       node_list[1] = &phba->fc_nlpmap_list;
-       node_list[2] = &phba->fc_nlpunmap_list;
-       node_list[3] = &phba->fc_prli_list;
-       node_list[4] = &phba->fc_reglogin_list;
-       node_list[5] = &phba->fc_adisc_list;
-       node_list[6] = &phba->fc_plogi_list;
-       for (i = 0; i < 7; i++) {
-               listp = node_list[i];
-               if (list_empty(listp))
-                       continue;
+       lpfc_els_flush_cmd(vport);
 
-               list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+       lpfc_cleanup_rpis(vport, 0);
 
-                       rc = lpfc_disc_state_machine(phba, ndlp, NULL,
-                                            NLP_EVT_DEVICE_RECOVERY);
+       /* Turn off discovery timer if its running */
+       lpfc_can_disctmo(vport);
+}
 
-               }
-       }
+static void
+lpfc_linkdown_port(struct lpfc_vport *vport)
+{
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+       fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+               "Link Down:       state:x%x rtry:x%x flg:x%x",
+               vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+       lpfc_port_link_failure(vport);
+
+}
+
+int
+lpfc_linkdown(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport = phba->pport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_vport **vports;
+       LPFC_MBOXQ_t          *mb;
+       int i;
 
-       /* free any ndlp's on unused list */
-       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
-                               nlp_listp) {
-               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+       if (phba->link_state == LPFC_LINK_DOWN)
+               return 0;
+       spin_lock_irq(&phba->hbalock);
+       if (phba->link_state > LPFC_LINK_DOWN) {
+               phba->link_state = LPFC_LINK_DOWN;
+               phba->pport->fc_flag &= ~FC_LBIT;
+       }
+       spin_unlock_irq(&phba->hbalock);
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+                       /* Issue a LINK DOWN event to all nodes */
+                       lpfc_linkdown_port(vports[i]);
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+       /* Clean up any firmware default rpi's */
+       mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (mb) {
+               lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+               mb->vport = vport;
+               mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+                   == MBX_NOT_FINISHED) {
+                       mempool_free(mb, phba->mbox_mem_pool);
+               }
        }
 
        /* Setup myDID for link up if we are in pt2pt mode */
-       if (phba->fc_flag & FC_PT2PT) {
-               phba->fc_myDID = 0;
-               if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+       if (phba->pport->fc_flag & FC_PT2PT) {
+               phba->pport->fc_myDID = 0;
+               mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (mb) {
                        lpfc_config_link(phba, mb);
-                       mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-                       if (lpfc_sli_issue_mbox
-                           (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+                       mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       mb->vport = vport;
+                       if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
                            == MBX_NOT_FINISHED) {
-                               mempool_free( mb, phba->mbox_mem_pool);
+                               mempool_free(mb, phba->mbox_mem_pool);
                        }
                }
-               spin_lock_irq(phba->host->host_lock);
-               phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
-               spin_unlock_irq(phba->host->host_lock);
+               spin_lock_irq(shost->host_lock);
+               phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+               spin_unlock_irq(shost->host_lock);
        }
-       spin_lock_irq(phba->host->host_lock);
-       phba->fc_flag &= ~FC_LBIT;
-       spin_unlock_irq(phba->host->host_lock);
 
-       /* Turn off discovery timer if its running */
-       lpfc_can_disctmo(phba);
-
-       /* Must process IOCBs on all rings to handle ABORTed I/Os */
        return 0;
 }
 
-static int
-lpfc_linkup(struct lpfc_hba * phba)
+static void
+lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
 {
-       struct lpfc_nodelist *ndlp, *next_ndlp;
-       struct list_head *listp, *node_list[7];
-       int i;
+       struct lpfc_nodelist *ndlp;
 
-       fc_host_post_event(phba->host, fc_get_event_number(),
-                       FCH_EVT_LINKUP, 0);
-
-       spin_lock_irq(phba->host->host_lock);
-       phba->hba_state = LPFC_LINK_UP;
-       phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
-                          FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
-       phba->fc_flag |= FC_NDISC_ACTIVE;
-       phba->fc_ns_retry = 0;
-       spin_unlock_irq(phba->host->host_lock);
-
-
-       node_list[0] = &phba->fc_plogi_list;
-       node_list[1] = &phba->fc_adisc_list;
-       node_list[2] = &phba->fc_reglogin_list;
-       node_list[3] = &phba->fc_prli_list;
-       node_list[4] = &phba->fc_nlpunmap_list;
-       node_list[5] = &phba->fc_nlpmap_list;
-       node_list[6] = &phba->fc_npr_list;
-       for (i = 0; i < 7; i++) {
-               listp = node_list[i];
-               if (list_empty(listp))
+       list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+               if (!NLP_CHK_NODE_ACT(ndlp))
                        continue;
-
-               list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-                       if (phba->fc_flag & FC_LBIT) {
-                               if (ndlp->nlp_type & NLP_FABRIC) {
-                                       /* On Linkup its safe to clean up the
-                                        * ndlp from Fabric connections.
-                                        */
-                                       lpfc_nlp_list(phba, ndlp,
-                                                       NLP_UNUSED_LIST);
-                               } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
-                                       /* Fail outstanding IO now since device
-                                        * is marked for PLOGI.
-                                        */
-                                       lpfc_unreg_rpi(phba, ndlp);
-                               }
-                       }
+               if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+                       continue;
+               if (ndlp->nlp_type & NLP_FABRIC) {
+                       /* On Linkup its safe to clean up the ndlp
+                        * from Fabric connections.
+                        */
+                       if (ndlp->nlp_DID != Fabric_DID)
+                               lpfc_unreg_rpi(vport, ndlp);
+                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+               } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+                       /* Fail outstanding IO now since device is
+                        * marked for PLOGI.
+                        */
+                       lpfc_unreg_rpi(vport, ndlp);
                }
        }
+}
 
-       /* free any ndlp's on unused list */
-       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
-                               nlp_listp) {
-               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-       }
+static void
+lpfc_linkup_port(struct lpfc_vport *vport)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_hba  *phba = vport->phba;
+
+       if ((vport->load_flag & FC_UNLOADING) != 0)
+               return;
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+               "Link Up:         top:x%x speed:x%x flg:x%x",
+               phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
+
+       /* If NPIV is not enabled, only bring the physical port up */
+       if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+               (vport != phba->pport))
+               return;
+
+       fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+                           FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+       vport->fc_flag |= FC_NDISC_ACTIVE;
+       vport->fc_ns_retry = 0;
+       spin_unlock_irq(shost->host_lock);
+
+       if (vport->fc_flag & FC_LBIT)
+               lpfc_linkup_cleanup_nodes(vport);
+
+}
+
+static int
+lpfc_linkup(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       int i;
+
+       phba->link_state = LPFC_LINK_UP;
+
+       /* Unblock fabric iocbs if they are blocked */
+       clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+       del_timer_sync(&phba->fabric_block_timer);
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+                       lpfc_linkup_port(vports[i]);
+       lpfc_destroy_vport_work_array(phba, vports);
+       if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+               lpfc_issue_clear_la(phba, phba->pport);
 
        return 0;
 }
@@ -524,15 +848,15 @@ lpfc_linkup(struct lpfc_hba * phba)
  * as the completion routine when the command is
  * handed off to the SLI layer.
  */
-void
-lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+static void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli;
-       MAILBOX_t *mb;
+       struct lpfc_vport *vport = pmb->vport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_sli   *psli = &phba->sli;
+       MAILBOX_t *mb = &pmb->mb;
        uint32_t control;
 
-       psli = &phba->sli;
-       mb = &pmb->mb;
        /* Since we don't do discovery right now, turn these off here */
        psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
        psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
@@ -541,70 +865,57 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
        /* Check for error */
        if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
                /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
-               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-                               "%d:0320 CLEAR_LA mbxStatus error x%x hba "
-                               "state x%x\n",
-                               phba->brd_no, mb->mbxStatus, phba->hba_state);
-
-               phba->hba_state = LPFC_HBA_ERROR;
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                                "0320 CLEAR_LA mbxStatus error x%x hba "
+                                "state x%x\n",
+                                mb->mbxStatus, vport->port_state);
+               phba->link_state = LPFC_HBA_ERROR;
                goto out;
        }
 
-       if (phba->fc_flag & FC_ABORT_DISCOVERY)
-               goto out;
-
-       phba->num_disc_nodes = 0;
-       /* go thru NPR list and issue ELS PLOGIs */
-       if (phba->fc_npr_cnt) {
-               lpfc_els_disc_plogi(phba);
-       }
+       if (vport->port_type == LPFC_PHYSICAL_PORT)
+               phba->link_state = LPFC_HBA_READY;
 
-       if (!phba->num_disc_nodes) {
-               spin_lock_irq(phba->host->host_lock);
-               phba->fc_flag &= ~FC_NDISC_ACTIVE;
-               spin_unlock_irq(phba->host->host_lock);
-       }
-
-       phba->hba_state = LPFC_HBA_READY;
+       spin_lock_irq(&phba->hbalock);
+       psli->sli_flag |= LPFC_PROCESS_LA;
+       control = readl(phba->HCregaddr);
+       control |= HC_LAINT_ENA;
+       writel(control, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+       spin_unlock_irq(&phba->hbalock);
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return;
 
 out:
        /* Device Discovery completes */
-       lpfc_printf_log(phba,
-                        KERN_INFO,
-                        LOG_DISCOVERY,
-                        "%d:0225 Device Discovery completes\n",
-                        phba->brd_no);
-
-       mempool_free( pmb, phba->mbox_mem_pool);
-
-       spin_lock_irq(phba->host->host_lock);
-       phba->fc_flag &= ~FC_ABORT_DISCOVERY;
-       if (phba->fc_flag & FC_ESTABLISH_LINK) {
-               phba->fc_flag &= ~FC_ESTABLISH_LINK;
-       }
-       spin_unlock_irq(phba->host->host_lock);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                        "0225 Device Discovery completes\n");
+       mempool_free(pmb, phba->mbox_mem_pool);
 
-       del_timer_sync(&phba->fc_estabtmo);
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag &= ~FC_ABORT_DISCOVERY;
+       spin_unlock_irq(shost->host_lock);
 
-       lpfc_can_disctmo(phba);
+       lpfc_can_disctmo(vport);
 
        /* turn on Link Attention interrupts */
-       spin_lock_irq(phba->host->host_lock);
+
+       spin_lock_irq(&phba->hbalock);
        psli->sli_flag |= LPFC_PROCESS_LA;
        control = readl(phba->HCregaddr);
        control |= HC_LAINT_ENA;
        writel(control, phba->HCregaddr);
        readl(phba->HCregaddr); /* flush */
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
 
        return;
 }
 
+
 static void
 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli = &phba->sli;
-       int rc;
+       struct lpfc_vport *vport = pmb->vport;
 
        if (pmb->mb.mbxStatus)
                goto out;
@@ -612,157 +923,143 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        mempool_free(pmb, phba->mbox_mem_pool);
 
        if (phba->fc_topology == TOPOLOGY_LOOP &&
-               phba->fc_flag & FC_PUBLIC_LOOP &&
-                !(phba->fc_flag & FC_LBIT)) {
+           vport->fc_flag & FC_PUBLIC_LOOP &&
+           !(vport->fc_flag & FC_LBIT)) {
                        /* Need to wait for FAN - use discovery timer
-                        * for timeout.  hba_state is identically
+                        * for timeout.  port_state is identically
                         * LPFC_LOCAL_CFG_LINK while waiting for FAN
                         */
-                       lpfc_set_disctmo(phba);
+                       lpfc_set_disctmo(vport);
                        return;
-               }
+       }
 
-       /* Start discovery by sending a FLOGI. hba_state is identically
+       /* Start discovery by sending a FLOGI. port_state is identically
         * LPFC_FLOGI while waiting for FLOGI cmpl
         */
-       phba->hba_state = LPFC_FLOGI;
-       lpfc_set_disctmo(phba);
-       lpfc_initial_flogi(phba);
+       if (vport->port_state != LPFC_FLOGI) {
+               lpfc_initial_flogi(vport);
+       }
        return;
 
 out:
-       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-                       "%d:0306 CONFIG_LINK mbxStatus error x%x "
-                       "HBA state x%x\n",
-                       phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                        "0306 CONFIG_LINK mbxStatus error x%x "
+                        "HBA state x%x\n",
+                        pmb->mb.mbxStatus, vport->port_state);
+       mempool_free(pmb, phba->mbox_mem_pool);
 
        lpfc_linkdown(phba);
 
-       phba->hba_state = LPFC_HBA_ERROR;
-
-       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                       "%d:0200 CONFIG_LINK bad hba state x%x\n",
-                       phba->brd_no, phba->hba_state);
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                        "0200 CONFIG_LINK bad hba state x%x\n",
+                        vport->port_state);
 
-       lpfc_clear_la(phba, pmb);
-       pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-       rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
-       if (rc == MBX_NOT_FINISHED) {
-               mempool_free(pmb, phba->mbox_mem_pool);
-               lpfc_disc_flush_list(phba);
-               psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-               psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-               psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-               phba->hba_state = LPFC_HBA_READY;
-       }
+       lpfc_issue_clear_la(phba, vport);
        return;
 }
 
 static void
-lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli = &phba->sli;
        MAILBOX_t *mb = &pmb->mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+       struct lpfc_vport  *vport = pmb->vport;
 
 
        /* Check for error */
        if (mb->mbxStatus) {
                /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
-               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-                               "%d:0319 READ_SPARAM mbxStatus error x%x "
-                               "hba state x%x>\n",
-                               phba->brd_no, mb->mbxStatus, phba->hba_state);
-
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                                "0319 READ_SPARAM mbxStatus error x%x "
+                                "hba state x%x>\n",
+                                mb->mbxStatus, vport->port_state);
                lpfc_linkdown(phba);
-               phba->hba_state = LPFC_HBA_ERROR;
                goto out;
        }
 
-       memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
+       memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
               sizeof (struct serv_parm));
        if (phba->cfg_soft_wwnn)
-               u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
+               u64_to_wwn(phba->cfg_soft_wwnn,
+                          vport->fc_sparam.nodeName.u.wwn);
        if (phba->cfg_soft_wwpn)
-               u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
-       memcpy((uint8_t *) & phba->fc_nodename,
-              (uint8_t *) & phba->fc_sparam.nodeName,
-              sizeof (struct lpfc_name));
-       memcpy((uint8_t *) & phba->fc_portname,
-              (uint8_t *) & phba->fc_sparam.portName,
-              sizeof (struct lpfc_name));
+               u64_to_wwn(phba->cfg_soft_wwpn,
+                          vport->fc_sparam.portName.u.wwn);
+       memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+              sizeof(vport->fc_nodename));
+       memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+              sizeof(vport->fc_portname));
+       if (vport->port_type == LPFC_PHYSICAL_PORT) {
+               memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
+               memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
+       }
+
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       mempool_free( pmb, phba->mbox_mem_pool);
+       mempool_free(pmb, phba->mbox_mem_pool);
        return;
 
 out:
        pmb->context1 = NULL;
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       if (phba->hba_state != LPFC_CLEAR_LA) {
-               lpfc_clear_la(phba, pmb);
-               pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-               if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
-                   == MBX_NOT_FINISHED) {
-                       mempool_free( pmb, phba->mbox_mem_pool);
-                       lpfc_disc_flush_list(phba);
-                       psli->ring[(psli->extra_ring)].flag &=
-                           ~LPFC_STOP_IOCB_EVENT;
-                       psli->ring[(psli->fcp_ring)].flag &=
-                           ~LPFC_STOP_IOCB_EVENT;
-                       psli->ring[(psli->next_ring)].flag &=
-                           ~LPFC_STOP_IOCB_EVENT;
-                       phba->hba_state = LPFC_HBA_READY;
-               }
-       } else {
-               mempool_free( pmb, phba->mbox_mem_pool);
-       }
+       lpfc_issue_clear_la(phba, vport);
+       mempool_free(pmb, phba->mbox_mem_pool);
        return;
 }
 
 static void
 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 {
-       int i;
+       struct lpfc_vport *vport = phba->pport;
        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+       int i;
        struct lpfc_dmabuf *mp;
        int rc;
 
        sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irq(&phba->hbalock);
        switch (la->UlnkSpeed) {
-               case LA_1GHZ_LINK:
-                       phba->fc_linkspeed = LA_1GHZ_LINK;
-                       break;
-               case LA_2GHZ_LINK:
-                       phba->fc_linkspeed = LA_2GHZ_LINK;
-                       break;
-               case LA_4GHZ_LINK:
-                       phba->fc_linkspeed = LA_4GHZ_LINK;
-                       break;
-               default:
-                       phba->fc_linkspeed = LA_UNKNW_LINK;
-                       break;
+       case LA_1GHZ_LINK:
+               phba->fc_linkspeed = LA_1GHZ_LINK;
+               break;
+       case LA_2GHZ_LINK:
+               phba->fc_linkspeed = LA_2GHZ_LINK;
+               break;
+       case LA_4GHZ_LINK:
+               phba->fc_linkspeed = LA_4GHZ_LINK;
+               break;
+       case LA_8GHZ_LINK:
+               phba->fc_linkspeed = LA_8GHZ_LINK;
+               break;
+       default:
+               phba->fc_linkspeed = LA_UNKNW_LINK;
+               break;
        }
 
        phba->fc_topology = la->topology;
+       phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
        if (phba->fc_topology == TOPOLOGY_LOOP) {
-       /* Get Loop Map information */
+               phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
+               if (phba->cfg_enable_npiv)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1309 Link Up Event npiv not supported in loop "
+                               "topology\n");
+                               /* Get Loop Map information */
                if (la->il)
-                       phba->fc_flag |= FC_LBIT;
+                       vport->fc_flag |= FC_LBIT;
 
-               phba->fc_myDID = la->granted_AL_PA;
+               vport->fc_myDID = la->granted_AL_PA;
                i = la->un.lilpBde64.tus.f.bdeSize;
 
                if (i == 0) {
                        phba->alpa_map[0] = 0;
                } else {
-                       if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
+                       if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
                                int numalpa, j, k;
                                union {
                                        uint8_t pamap[16];
@@ -786,29 +1083,33 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
                                        }
                                        /* Link Up Event ALPA map */
                                        lpfc_printf_log(phba,
-                                               KERN_WARNING,
-                                               LOG_LINK_EVENT,
-                                               "%d:1304 Link Up Event "
-                                               "ALPA map Data: x%x "
-                                               "x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               un.pa.wd1, un.pa.wd2,
-                                               un.pa.wd3, un.pa.wd4);
+                                                       KERN_WARNING,
+                                                       LOG_LINK_EVENT,
+                                                       "1304 Link Up Event "
+                                                       "ALPA map Data: x%x "
+                                                       "x%x x%x x%x\n",
+                                                       un.pa.wd1, un.pa.wd2,
+                                                       un.pa.wd3, un.pa.wd4);
                                }
                        }
                }
        } else {
-               phba->fc_myDID = phba->fc_pref_DID;
-               phba->fc_flag |= FC_LBIT;
+               if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+                       if (phba->max_vpi && phba->cfg_enable_npiv &&
+                          (phba->sli_rev == 3))
+                               phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+               }
+               vport->fc_myDID = phba->fc_pref_DID;
+               vport->fc_flag |= FC_LBIT;
        }
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
 
        lpfc_linkup(phba);
        if (sparam_mbox) {
-               lpfc_read_sparam(phba, sparam_mbox);
+               lpfc_read_sparam(phba, sparam_mbox, 0);
+               sparam_mbox->vport = vport;
                sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
-               rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
-                                               (MBX_NOWAIT | MBX_STOP_IOCB));
+               rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
                if (rc == MBX_NOT_FINISHED) {
                        mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -816,38 +1117,52 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
                        mempool_free(sparam_mbox, phba->mbox_mem_pool);
                        if (cfglink_mbox)
                                mempool_free(cfglink_mbox, phba->mbox_mem_pool);
-                       return;
+                       goto out;
                }
        }
 
        if (cfglink_mbox) {
-               phba->hba_state = LPFC_LOCAL_CFG_LINK;
+               vport->port_state = LPFC_LOCAL_CFG_LINK;
                lpfc_config_link(phba, cfglink_mbox);
+               cfglink_mbox->vport = vport;
                cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
-               rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
-                                               (MBX_NOWAIT | MBX_STOP_IOCB));
-               if (rc == MBX_NOT_FINISHED)
-                       mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+               rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+               if (rc != MBX_NOT_FINISHED)
+                       return;
+               mempool_free(cfglink_mbox, phba->mbox_mem_pool);
        }
+out:
+       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                        "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+                        vport->port_state, sparam_mbox, cfglink_mbox);
+       lpfc_issue_clear_la(phba, vport);
+       return;
 }
 
 static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
+lpfc_enable_la(struct lpfc_hba *phba)
+{
        uint32_t control;
        struct lpfc_sli *psli = &phba->sli;
-
-       lpfc_linkdown(phba);
-
-       /* turn on Link Attention interrupts - no CLEAR_LA needed */
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irq(&phba->hbalock);
        psli->sli_flag |= LPFC_PROCESS_LA;
        control = readl(phba->HCregaddr);
        control |= HC_LAINT_ENA;
        writel(control, phba->HCregaddr);
        readl(phba->HCregaddr); /* flush */
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+       lpfc_linkdown(phba);
+       lpfc_enable_la(phba);
+       /* turn on Link Attention interrupts - no CLEAR_LA needed */
 }
 
+
 /*
  * This routine handles processing a READ_LA mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -855,22 +1170,23 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
+       struct lpfc_vport *vport = pmb->vport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
        READ_LA_VAR *la;
        MAILBOX_t *mb = &pmb->mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
+       /* Unblock ELS traffic */
+       phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
        /* Check for error */
        if (mb->mbxStatus) {
-               lpfc_printf_log(phba,
-                               KERN_INFO,
-                               LOG_LINK_EVENT,
-                               "%d:1307 READ_LA mbox error x%x state x%x\n",
-                               phba->brd_no,
-                               mb->mbxStatus, phba->hba_state);
+               lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+                               "1307 READ_LA mbox error x%x state x%x\n",
+                               mb->mbxStatus, vport->port_state);
                lpfc_mbx_issue_link_down(phba);
-               phba->hba_state = LPFC_HBA_ERROR;
+               phba->link_state = LPFC_HBA_ERROR;
                goto lpfc_mbx_cmpl_read_la_free_mbuf;
        }
 
@@ -878,42 +1194,101 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 
        memcpy(&phba->alpa_map[0], mp->virt, 128);
 
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irq(shost->host_lock);
        if (la->pb)
-               phba->fc_flag |= FC_BYPASSED_MODE;
+               vport->fc_flag |= FC_BYPASSED_MODE;
        else
-               phba->fc_flag &= ~FC_BYPASSED_MODE;
-       spin_unlock_irq(phba->host->host_lock);
+               vport->fc_flag &= ~FC_BYPASSED_MODE;
+       spin_unlock_irq(shost->host_lock);
 
        if (((phba->fc_eventTag + 1) < la->eventTag) ||
-            (phba->fc_eventTag == la->eventTag)) {
+           (phba->fc_eventTag == la->eventTag)) {
                phba->fc_stat.LinkMultiEvent++;
-               if (la->attType == AT_LINK_UP) {
+               if (la->attType == AT_LINK_UP)
                        if (phba->fc_eventTag != 0)
                                lpfc_linkdown(phba);
-               }
        }
 
        phba->fc_eventTag = la->eventTag;
+       if (la->mm)
+               phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+       else
+               phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
 
-       if (la->attType == AT_LINK_UP) {
+       if (la->attType == AT_LINK_UP && (!la->mm)) {
                phba->fc_stat.LinkUp++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-                               "%d:1303 Link Up Event x%x received "
-                               "Data: x%x x%x x%x x%x\n",
-                               phba->brd_no, la->eventTag, phba->fc_eventTag,
-                               la->granted_AL_PA, la->UlnkSpeed,
-                               phba->alpa_map[0]);
+               if (phba->link_flag & LS_LOOPBACK_MODE) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                                       "1306 Link Up Event in loop back mode "
+                                       "x%x received Data: x%x x%x x%x x%x\n",
+                                       la->eventTag, phba->fc_eventTag,
+                                       la->granted_AL_PA, la->UlnkSpeed,
+                                       phba->alpa_map[0]);
+               } else {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                                       "1303 Link Up Event x%x received "
+                                       "Data: x%x x%x x%x x%x x%x x%x %d\n",
+                                       la->eventTag, phba->fc_eventTag,
+                                       la->granted_AL_PA, la->UlnkSpeed,
+                                       phba->alpa_map[0],
+                                       la->mm, la->fa,
+                                       phba->wait_4_mlo_maint_flg);
+               }
                lpfc_mbx_process_link_up(phba, la);
-       } else {
+       } else if (la->attType == AT_LINK_DOWN) {
                phba->fc_stat.LinkDown++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-                               "%d:1305 Link Down Event x%x received "
+               if (phba->link_flag & LS_LOOPBACK_MODE) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1308 Link Down Event in loop back mode "
+                               "x%x received "
                                "Data: x%x x%x x%x\n",
-                               phba->brd_no, la->eventTag, phba->fc_eventTag,
-                               phba->hba_state, phba->fc_flag);
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag);
+               }
+               else {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1305 Link Down Event x%x received "
+                               "Data: x%x x%x x%x x%x x%x\n",
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag,
+                               la->mm, la->fa);
+               }
                lpfc_mbx_issue_link_down(phba);
        }
+       if (la->mm && la->attType == AT_LINK_UP) {
+               if (phba->link_state != LPFC_LINK_DOWN) {
+                       phba->fc_stat.LinkDown++;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1312 Link Down Event x%x received "
+                               "Data: x%x x%x x%x\n",
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag);
+                       lpfc_mbx_issue_link_down(phba);
+               } else
+                       lpfc_enable_la(phba);
+
+               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1310 Menlo Maint Mode Link up Event x%x rcvd "
+                               "Data: x%x x%x x%x\n",
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag);
+               /*
+                * The cmnd that triggered this will be waiting for this
+                * signal.
+                */
+               /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+               if (phba->wait_4_mlo_maint_flg) {
+                       phba->wait_4_mlo_maint_flg = 0;
+                       wake_up_interruptible(&phba->wait_4_mlo_m_q);
+               }
+       }
+
+       if (la->fa) {
+               if (la->mm)
+                       lpfc_issue_clear_la(phba, vport);
+               lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+                               "1311 fa %d\n", la->fa);
+       }
 
 lpfc_mbx_cmpl_read_la_free_mbuf:
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -929,30 +1304,118 @@ lpfc_mbx_cmpl_read_la_free_mbuf:
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli;
-       MAILBOX_t *mb;
-       struct lpfc_dmabuf *mp;
-       struct lpfc_nodelist *ndlp;
-
-       psli = &phba->sli;
-       mb = &pmb->mb;
-
-       ndlp = (struct lpfc_nodelist *) pmb->context2;
-       mp = (struct lpfc_dmabuf *) (pmb->context1);
+       struct lpfc_vport  *vport = pmb->vport;
+       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
        pmb->context1 = NULL;
 
        /* Good status, call state machine */
-       lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+       lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       mempool_free( pmb, phba->mbox_mem_pool);
+       mempool_free(pmb, phba->mbox_mem_pool);
+       /* decrement the node reference count held for this callback
+        * function.
+        */
+       lpfc_nlp_put(ndlp);
 
        return;
 }
 
+static void
+lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_vport *vport = pmb->vport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+       switch (mb->mbxStatus) {
+       case 0x0011:
+       case 0x0020:
+       case 0x9700:
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                                "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
+                                mb->mbxStatus);
+               break;
+       }
+       vport->unreg_vpi_cmpl = VPORT_OK;
+       mempool_free(pmb, phba->mbox_mem_pool);
+       /*
+        * This shost reference might have been taken at the beginning of
+        * lpfc_vport_delete()
+        */
+       if (vport->load_flag & FC_UNLOADING)
+               scsi_host_put(shost);
+}
+
+int
+lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return 1;
+
+       lpfc_unreg_vpi(phba, vport->vpi, mbox);
+       mbox->vport = vport;
+       mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+                                "1800 Could not issue unreg_vpi\n");
+               mempool_free(mbox, phba->mbox_mem_pool);
+               vport->unreg_vpi_cmpl = VPORT_ERROR;
+               return rc;
+       }
+       return 0;
+}
+
+static void
+lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       struct lpfc_vport *vport = pmb->vport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+       MAILBOX_t *mb = &pmb->mb;
+
+       switch (mb->mbxStatus) {
+       case 0x0011:
+       case 0x9601:
+       case 0x9602:
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                                "0912 cmpl_reg_vpi, mb status = 0x%x\n",
+                                mb->mbxStatus);
+               lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               spin_unlock_irq(shost->host_lock);
+               vport->fc_myDID = 0;
+               goto out;
+       }
+
+       vport->num_disc_nodes = 0;
+       /* go thru NPR list and issue ELS PLOGIs */
+       if (vport->fc_npr_cnt)
+               lpfc_els_disc_plogi(vport);
+
+       if (!vport->num_disc_nodes) {
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag &= ~FC_NDISC_ACTIVE;
+               spin_unlock_irq(shost->host_lock);
+               lpfc_can_disctmo(vport);
+       }
+       vport->port_state = LPFC_VPORT_READY;
+
+out:
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return;
+}
+
 /*
  * This routine handles processing a Fabric REG_LOGIN mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -960,88 +1423,87 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli;
-       MAILBOX_t *mb;
-       struct lpfc_dmabuf *mp;
+       struct lpfc_vport *vport = pmb->vport;
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp;
-       struct lpfc_nodelist *ndlp_fdmi;
-
-
-       psli = &phba->sli;
-       mb = &pmb->mb;
+       struct lpfc_vport **vports;
+       int i;
 
        ndlp = (struct lpfc_nodelist *) pmb->context2;
-       mp = (struct lpfc_dmabuf *) (pmb->context1);
-
+       pmb->context1 = NULL;
+       pmb->context2 = NULL;
        if (mb->mbxStatus) {
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
-               mempool_free( pmb, phba->mbox_mem_pool);
-               mempool_free( ndlp, phba->nlp_mem_pool);
+               mempool_free(pmb, phba->mbox_mem_pool);
 
-               /* FLOGI failed, so just use loop map to make discovery list */
-               lpfc_disc_list_loopmap(phba);
+               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       /* FLOGI failed, use loop map to make discovery list */
+                       lpfc_disc_list_loopmap(vport);
 
-               /* Start discovery */
-               lpfc_disc_start(phba);
+                       /* Start discovery */
+                       lpfc_disc_start(vport);
+                       /* Decrement the reference count to ndlp after the
+                        * reference to the ndlp are done.
+                        */
+                       lpfc_nlp_put(ndlp);
+                       return;
+               }
+
+               lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                                "0258 Register Fabric login error: 0x%x\n",
+                                mb->mbxStatus);
+               /* Decrement the reference count to ndlp after the reference
+                * to the ndlp are done.
+                */
+               lpfc_nlp_put(ndlp);
                return;
        }
 
-       pmb->context1 = NULL;
-
        ndlp->nlp_rpi = mb->un.varWords[0];
        ndlp->nlp_type |= NLP_FABRIC;
-       ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-       lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-
-       if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
-               /* This NPort has been assigned an NPort_ID by the fabric as a
-                * result of the completed fabric login.  Issue a State Change
-                * Registration (SCR) ELS request to the fabric controller
-                * (SCR_DID) so that this NPort gets RSCN events from the
-                * fabric.
-                */
-               lpfc_issue_els_scr(phba, SCR_DID, 0);
-
-               ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
-               if (!ndlp) {
-                       /* Allocate a new node instance. If the pool is empty,
-                        * start the discovery process and skip the Nameserver
-                        * login process.  This is attempted again later on.
-                        * Otherwise, issue a Port Login (PLOGI) to NameServer.
-                        */
-                       ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
-                       if (!ndlp) {
-                               lpfc_disc_start(phba);
-                               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                               kfree(mp);
-                               mempool_free( pmb, phba->mbox_mem_pool);
-                               return;
-                       } else {
-                               lpfc_nlp_init(phba, ndlp, NameServer_DID);
-                               ndlp->nlp_type |= NLP_FABRIC;
-                       }
-               }
-               ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-               lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-               lpfc_issue_els_plogi(phba, NameServer_DID, 0);
-               if (phba->cfg_fdmi_on) {
-                       ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
-                                                               GFP_KERNEL);
-                       if (ndlp_fdmi) {
-                               lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
-                               ndlp_fdmi->nlp_type |= NLP_FABRIC;
-                               ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
-                               lpfc_issue_els_plogi(phba, FDMI_DID, 0);
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+       if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+               vports = lpfc_create_vport_work_array(phba);
+               if (vports != NULL)
+                       for(i = 0;
+                           i <= phba->max_vpi && vports[i] != NULL;
+                           i++) {
+                               if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+                                       continue;
+                               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                                       lpfc_vport_set_state(vports[i],
+                                                       FC_VPORT_LINKDOWN);
+                                       continue;
+                               }
+                               if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+                                       lpfc_initial_fdisc(vports[i]);
+                               else {
+                                       lpfc_vport_set_state(vports[i],
+                                               FC_VPORT_NO_FABRIC_SUPP);
+                                       lpfc_printf_vlog(vport, KERN_ERR,
+                                                        LOG_ELS,
+                                                       "0259 No NPIV "
+                                                       "Fabric support\n");
+                               }
                        }
-               }
+               lpfc_destroy_vport_work_array(phba, vports);
+               lpfc_do_scr_ns_plogi(phba, vport);
        }
 
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       mempool_free( pmb, phba->mbox_mem_pool);
+       mempool_free(pmb, phba->mbox_mem_pool);
+
+       /* Drop the reference count from the mbox at the end after
+        * all the current reference to the ndlp have been done.
+        */
+       lpfc_nlp_put(ndlp);
        return;
 }
 
@@ -1052,31 +1514,41 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli;
-       MAILBOX_t *mb;
-       struct lpfc_dmabuf *mp;
-       struct lpfc_nodelist *ndlp;
-
-       psli = &phba->sli;
-       mb = &pmb->mb;
-
-       ndlp = (struct lpfc_nodelist *) pmb->context2;
-       mp = (struct lpfc_dmabuf *) (pmb->context1);
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+       struct lpfc_vport *vport = pmb->vport;
 
        if (mb->mbxStatus) {
+out:
+               /* decrement the node reference count held for this
+                * callback function.
+                */
+               lpfc_nlp_put(ndlp);
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
-               mempool_free( pmb, phba->mbox_mem_pool);
-               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+               mempool_free(pmb, phba->mbox_mem_pool);
 
-               /* RegLogin failed, so just use loop map to make discovery
-                  list */
-               lpfc_disc_list_loopmap(phba);
+               /* If no other thread is using the ndlp, free it */
+               lpfc_nlp_not_used(ndlp);
 
-               /* Start discovery */
-               lpfc_disc_start(phba);
+               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       /*
+                        * RegLogin failed, use loop map to make discovery
+                        * list
+                        */
+                       lpfc_disc_list_loopmap(vport);
+
+                       /* Start discovery */
+                       lpfc_disc_start(vport);
+                       return;
+               }
+               lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                                "0260 Register NameServer error: 0x%x\n",
+                                mb->mbxStatus);
                return;
        }
 
@@ -1084,38 +1556,46 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 
        ndlp->nlp_rpi = mb->un.varWords[0];
        ndlp->nlp_type |= NLP_FABRIC;
-       ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-       lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+       if (vport->port_state < LPFC_VPORT_READY) {
+               /* Link up discovery requires Fabric registration. */
+               lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
+               lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
+               lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+               lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+               lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
 
-       if (phba->hba_state < LPFC_HBA_READY) {
-               /* Link up discovery requires Fabrib registration. */
-               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
-               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
-               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
-               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
+               /* Issue SCR just before NameServer GID_FT Query */
+               lpfc_issue_els_scr(vport, SCR_DID, 0);
        }
 
-       phba->fc_ns_retry = 0;
+       vport->fc_ns_retry = 0;
        /* Good status, issue CT Request to NameServer */
-       if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
+       if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
                /* Cannot issue NameServer Query, so finish up discovery */
-               lpfc_disc_start(phba);
+               goto out;
        }
 
+       /* decrement the node reference count held for this
+        * callback function.
+        */
+       lpfc_nlp_put(ndlp);
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       mempool_free( pmb, phba->mbox_mem_pool);
+       mempool_free(pmb, phba->mbox_mem_pool);
 
        return;
 }
 
 static void
-lpfc_register_remote_port(struct lpfc_hba * phba,
-                           struct lpfc_nodelist * ndlp)
+lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-       struct fc_rport *rport;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct fc_rport  *rport;
        struct lpfc_rport_data *rdata;
        struct fc_rport_identifiers rport_ids;
+       struct lpfc_hba  *phba = vport->phba;
 
        /* Remote port has reappeared. Re-register w/ FC transport */
        rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
@@ -1123,8 +1603,23 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
        rport_ids.port_id = ndlp->nlp_DID;
        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
 
-       ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
-       if (!rport) {
+       /*
+        * We leave our node pointer in rport->dd_data when we unregister a
+        * FCP target port.  But fc_remote_port_add zeros the space to which
+        * rport->dd_data points.  So, if we're reusing a previously
+        * registered port, drop the reference that we took the last time we
+        * registered the port.
+        */
+       if (ndlp->rport && ndlp->rport->dd_data &&
+           ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
+               lpfc_nlp_put(ndlp);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+               "rport add:       did:x%x flg:x%x type x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+       ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
+       if (!rport || !get_device(&rport->dev)) {
                dev_printk(KERN_WARNING, &phba->pcidev->dev,
                           "Warning: fc_remote_port_add failed\n");
                return;
@@ -1134,7 +1629,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
        rport->maxframe_size = ndlp->nlp_maxframe;
        rport->supported_classes = ndlp->nlp_class_sup;
        rdata = rport->dd_data;
-       rdata->pnode = ndlp;
+       rdata->pnode = lpfc_nlp_get(ndlp);
 
        if (ndlp->nlp_type & NLP_FCP_TARGET)
                rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
@@ -1146,254 +1641,357 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
                fc_remote_port_rolechg(rport, rport_ids.roles);
 
        if ((rport->scsi_target_id != -1) &&
-               (rport->scsi_target_id < LPFC_MAX_TARGET)) {
+           (rport->scsi_target_id < LPFC_MAX_TARGET)) {
                ndlp->nlp_sid = rport->scsi_target_id;
        }
-
        return;
 }
 
 static void
-lpfc_unregister_remote_port(struct lpfc_hba * phba,
-                           struct lpfc_nodelist * ndlp)
+lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
 {
        struct fc_rport *rport = ndlp->rport;
-       struct lpfc_rport_data *rdata = rport->dd_data;
 
-       if (rport->scsi_target_id == -1) {
-               ndlp->rport = NULL;
-               rdata->pnode = NULL;
-       }
+       lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+               "rport delete:    did:x%x flg:x%x type x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
 
        fc_remote_port_delete(rport);
 
        return;
 }
 
-int
-lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
+static void
+lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
 {
-       enum { none, unmapped, mapped } rport_add = none, rport_del = none;
-       struct lpfc_sli      *psli;
-
-       psli = &phba->sli;
-       /* Sanity check to ensure we are not moving to / from the same list */
-       if ((nlp->nlp_flag & NLP_LIST_MASK) == list)
-               if (list != NLP_NO_LIST)
-                       return 0;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       spin_lock_irq(phba->host->host_lock);
-       switch (nlp->nlp_flag & NLP_LIST_MASK) {
-       case NLP_NO_LIST: /* Not on any list */
+       spin_lock_irq(shost->host_lock);
+       switch (state) {
+       case NLP_STE_UNUSED_NODE:
+               vport->fc_unused_cnt += count;
                break;
-       case NLP_UNUSED_LIST:
-               phba->fc_unused_cnt--;
-               list_del(&nlp->nlp_listp);
+       case NLP_STE_PLOGI_ISSUE:
+               vport->fc_plogi_cnt += count;
                break;
-       case NLP_PLOGI_LIST:
-               phba->fc_plogi_cnt--;
-               list_del(&nlp->nlp_listp);
+       case NLP_STE_ADISC_ISSUE:
+               vport->fc_adisc_cnt += count;
                break;
-       case NLP_ADISC_LIST:
-               phba->fc_adisc_cnt--;
-               list_del(&nlp->nlp_listp);
+       case NLP_STE_REG_LOGIN_ISSUE:
+               vport->fc_reglogin_cnt += count;
                break;
-       case NLP_REGLOGIN_LIST:
-               phba->fc_reglogin_cnt--;
-               list_del(&nlp->nlp_listp);
+       case NLP_STE_PRLI_ISSUE:
+               vport->fc_prli_cnt += count;
                break;
-       case NLP_PRLI_LIST:
-               phba->fc_prli_cnt--;
-               list_del(&nlp->nlp_listp);
+       case NLP_STE_UNMAPPED_NODE:
+               vport->fc_unmap_cnt += count;
                break;
-       case NLP_UNMAPPED_LIST:
-               phba->fc_unmap_cnt--;
-               list_del(&nlp->nlp_listp);
-               nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
-               nlp->nlp_type &= ~NLP_FC_NODE;
-               phba->nport_event_cnt++;
-               if (nlp->rport)
-                       rport_del = unmapped;
+       case NLP_STE_MAPPED_NODE:
+               vport->fc_map_cnt += count;
                break;
-       case NLP_MAPPED_LIST:
-               phba->fc_map_cnt--;
-               list_del(&nlp->nlp_listp);
-               phba->nport_event_cnt++;
-               if (nlp->rport)
-                       rport_del = mapped;
-               break;
-       case NLP_NPR_LIST:
-               phba->fc_npr_cnt--;
-               list_del(&nlp->nlp_listp);
-               /* Stop delay tmo if taking node off NPR list */
-               if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
-                  (list != NLP_NPR_LIST)) {
-                       spin_unlock_irq(phba->host->host_lock);
-                       lpfc_cancel_retry_delay_tmo(phba, nlp);
-                       spin_lock_irq(phba->host->host_lock);
-               }
+       case NLP_STE_NPR_NODE:
+               vport->fc_npr_cnt += count;
                break;
        }
+       spin_unlock_irq(shost->host_lock);
+}
 
-       nlp->nlp_flag &= ~NLP_LIST_MASK;
-
-       /* Add NPort <did> to <num> list */
-       lpfc_printf_log(phba,
-                       KERN_INFO,
-                       LOG_NODE,
-                       "%d:0904 Add NPort x%x to %d list Data: x%x\n",
-                       phba->brd_no,
-                       nlp->nlp_DID, list, nlp->nlp_flag);
+static void
+lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                      int old_state, int new_state)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       switch (list) {
-       case NLP_NO_LIST: /* No list, just remove it */
-               spin_unlock_irq(phba->host->host_lock);
-               lpfc_nlp_remove(phba, nlp);
-               spin_lock_irq(phba->host->host_lock);
-               /* as node removed - stop further transport calls */
-               rport_del = none;
-               break;
-       case NLP_UNUSED_LIST:
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the unused list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
-               phba->fc_unused_cnt++;
-               break;
-       case NLP_PLOGI_LIST:
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the plogi list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
-               phba->fc_plogi_cnt++;
-               break;
-       case NLP_ADISC_LIST:
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the adisc list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
-               phba->fc_adisc_cnt++;
-               break;
-       case NLP_REGLOGIN_LIST:
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the reglogin list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
-               phba->fc_reglogin_cnt++;
-               break;
-       case NLP_PRLI_LIST:
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the prli list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
-               phba->fc_prli_cnt++;
-               break;
-       case NLP_UNMAPPED_LIST:
-               rport_add = unmapped;
-               /* ensure all vestiges of "mapped" significance are gone */
-               nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the unmap list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
-               phba->fc_unmap_cnt++;
-               phba->nport_event_cnt++;
-               nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
-               nlp->nlp_type |= NLP_FC_NODE;
-               break;
-       case NLP_MAPPED_LIST:
-               rport_add = mapped;
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the map list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
-               phba->fc_map_cnt++;
-               phba->nport_event_cnt++;
-               nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
-               break;
-       case NLP_NPR_LIST:
-               nlp->nlp_flag |= list;
-               /* Put it at the end of the npr list */
-               list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
-               phba->fc_npr_cnt++;
+       if (new_state == NLP_STE_UNMAPPED_NODE) {
+               ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+               ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+               ndlp->nlp_type |= NLP_FC_NODE;
+       }
+       if (new_state == NLP_STE_MAPPED_NODE)
+               ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+       if (new_state == NLP_STE_NPR_NODE)
+               ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
 
-               nlp->nlp_flag &= ~NLP_RCV_PLOGI;
-               break;
-       case NLP_JUST_DQ:
-               break;
+       /* Transport interface */
+       if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
+                           old_state == NLP_STE_UNMAPPED_NODE)) {
+               vport->phba->nport_event_cnt++;
+               lpfc_unregister_remote_port(ndlp);
        }
 
-       spin_unlock_irq(phba->host->host_lock);
+       if (new_state ==  NLP_STE_MAPPED_NODE ||
+           new_state == NLP_STE_UNMAPPED_NODE) {
+               vport->phba->nport_event_cnt++;
+               /*
+                * Tell the fc transport about the port, if we haven't
+                * already. If we have, and it's a scsi entity, be
+                * sure to unblock any attached scsi devices
+                */
+               lpfc_register_remote_port(vport, ndlp);
+       }
+       if ((new_state ==  NLP_STE_MAPPED_NODE) &&
+               (vport->stat_data_enabled)) {
+               /*
+                * A new target is discovered, if there is no buffer for
+                * statistical data collection allocate buffer.
+                */
+               ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+                                        sizeof(struct lpfc_scsicmd_bkt),
+                                        GFP_KERNEL);
 
+               if (!ndlp->lat_data)
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+                               "0286 lpfc_nlp_state_cleanup failed to "
+                               "allocate statistical data buffer DID "
+                               "0x%x\n", ndlp->nlp_DID);
+       }
        /*
-        * We make all the calls into the transport after we have
-        * moved the node between lists. This so that we don't
-        * release the lock while in-between lists.
+        * if we added to Mapped list, but the remote port
+        * registration failed or assigned a target id outside
+        * our presentable range - move the node to the
+        * Unmapped List
         */
+       if (new_state == NLP_STE_MAPPED_NODE &&
+           (!ndlp->rport ||
+            ndlp->rport->scsi_target_id == -1 ||
+            ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
+               spin_lock_irq(shost->host_lock);
+               ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+               spin_unlock_irq(shost->host_lock);
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+       }
+}
 
-       /* Don't upcall midlayer if we're unloading */
-       if (!(phba->fc_flag & FC_UNLOADING)) {
-               /*
-                * We revalidate the rport pointer as the "add" function
-                * may have removed the remote port.
-                */
-               if ((rport_del != none) && nlp->rport)
-                       lpfc_unregister_remote_port(phba, nlp);
+static char *
+lpfc_nlp_state_name(char *buffer, size_t size, int state)
+{
+       static char *states[] = {
+               [NLP_STE_UNUSED_NODE] = "UNUSED",
+               [NLP_STE_PLOGI_ISSUE] = "PLOGI",
+               [NLP_STE_ADISC_ISSUE] = "ADISC",
+               [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
+               [NLP_STE_PRLI_ISSUE] = "PRLI",
+               [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
+               [NLP_STE_MAPPED_NODE] = "MAPPED",
+               [NLP_STE_NPR_NODE] = "NPR",
+       };
+
+       if (state < NLP_STE_MAX_STATE && states[state])
+               strlcpy(buffer, states[state], size);
+       else
+               snprintf(buffer, size, "unknown (%d)", state);
+       return buffer;
+}
 
-               if (rport_add != none) {
-                       /*
-                        * Tell the fc transport about the port, if we haven't
-                        * already. If we have, and it's a scsi entity, be
-                        * sure to unblock any attached scsi devices
-                        */
-                       if ((!nlp->rport) || (nlp->rport->port_state ==
-                                       FC_PORTSTATE_BLOCKED))
-                               lpfc_register_remote_port(phba, nlp);
+void
+lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                  int state)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       int  old_state = ndlp->nlp_state;
+       char name1[16], name2[16];
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                        "0904 NPort state transition x%06x, %s -> %s\n",
+                        ndlp->nlp_DID,
+                        lpfc_nlp_state_name(name1, sizeof(name1), old_state),
+                        lpfc_nlp_state_name(name2, sizeof(name2), state));
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+               "node statechg    did:x%x old:%d ste:%d",
+               ndlp->nlp_DID, old_state, state);
+
+       if (old_state == NLP_STE_NPR_NODE &&
+           state != NLP_STE_NPR_NODE)
+               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       if (old_state == NLP_STE_UNMAPPED_NODE) {
+               ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+               ndlp->nlp_type &= ~NLP_FC_NODE;
+       }
+
+       if (list_empty(&ndlp->nlp_listp)) {
+               spin_lock_irq(shost->host_lock);
+               list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+               spin_unlock_irq(shost->host_lock);
+       } else if (old_state)
+               lpfc_nlp_counters(vport, old_state, -1);
+
+       ndlp->nlp_state = state;
+       lpfc_nlp_counters(vport, state, 1);
+       lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
+}
 
-                       /*
-                        * if we added to Mapped list, but the remote port
-                        * registration failed or assigned a target id outside
-                        * our presentable range - move the node to the
-                        * Unmapped List
-                        */
-                       if ((rport_add == mapped) &&
-                           ((!nlp->rport) ||
-                            (nlp->rport->scsi_target_id == -1) ||
-                            (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) {
-                               nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-                               spin_lock_irq(phba->host->host_lock);
-                               nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
-                               spin_unlock_irq(phba->host->host_lock);
-                               lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
-                       }
-               }
+void
+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+       if (list_empty(&ndlp->nlp_listp)) {
+               spin_lock_irq(shost->host_lock);
+               list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+               spin_unlock_irq(shost->host_lock);
        }
-       return 0;
+}
+
+void
+lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+               lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+       spin_lock_irq(shost->host_lock);
+       list_del_init(&ndlp->nlp_listp);
+       spin_unlock_irq(shost->host_lock);
+       lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+                               NLP_STE_UNUSED_NODE);
+}
+
+static void
+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+               lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+       lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+                               NLP_STE_UNUSED_NODE);
+}
+/**
+ * lpfc_initialize_node: Initialize all fields of node object.
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ *     This function is always called when node object need to
+ * be initialized. It initializes all the fields of the node
+ * object.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+       uint32_t did)
+{
+       INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+       INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+       init_timer(&ndlp->nlp_delayfunc);
+       ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+       ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+       ndlp->nlp_DID = did;
+       ndlp->vport = vport;
+       ndlp->nlp_sid = NLP_NO_SID;
+       kref_init(&ndlp->kref);
+       NLP_INT_NODE_ACT(ndlp);
+       atomic_set(&ndlp->cmd_pending, 0);
+       ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+}
+
+struct lpfc_nodelist *
+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                int state)
+{
+       struct lpfc_hba *phba = vport->phba;
+       uint32_t did;
+       unsigned long flags;
+
+       if (!ndlp)
+               return NULL;
+
+       spin_lock_irqsave(&phba->ndlp_lock, flags);
+       /* The ndlp should not be in memory free mode */
+       if (NLP_CHK_FREE_REQ(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0277 lpfc_enable_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return NULL;
+       }
+       /* The ndlp should not already be in active mode */
+       if (NLP_CHK_NODE_ACT(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0278 lpfc_enable_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return NULL;
+       }
+
+       /* Keep the original DID */
+       did = ndlp->nlp_DID;
+
+       /* re-initialize ndlp except of ndlp linked list pointer */
+       memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+               sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+       lpfc_initialize_node(vport, ndlp, did);
+
+       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+       if (state != NLP_STE_UNUSED_NODE)
+               lpfc_nlp_set_state(vport, ndlp, state);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+               "node enable:       did:x%x",
+               ndlp->nlp_DID, 0, 0);
+       return ndlp;
+}
+
+void
+lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       /*
+        * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
+        * be used if we wish to issue the "last" lpfc_nlp_put() to remove
+        * the ndlp from the vport. The ndlp marked as UNUSED on the list
+        * until ALL other outstanding threads have completed. We check
+        * that the ndlp not already in the UNUSED state before we proceed.
+        */
+       if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+               return;
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+       lpfc_nlp_put(ndlp);
+       return;
 }
 
 /*
  * Start / ReStart rescue timer for Discovery / RSCN handling
  */
 void
-lpfc_set_disctmo(struct lpfc_hba * phba)
+lpfc_set_disctmo(struct lpfc_vport *vport)
 {
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_hba  *phba = vport->phba;
        uint32_t tmo;
 
-       if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
-               /* For FAN, timeout should be greater then edtov */
+       if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
+               /* For FAN, timeout should be greater than edtov */
                tmo = (((phba->fc_edtov + 999) / 1000) + 1);
        } else {
-               /* Normal discovery timeout should be > then ELS/CT timeout
+               /* Normal discovery timeout should be > than ELS/CT timeout
                 * FC spec states we need 3 * ratov for CT requests
                 */
                tmo = ((phba->fc_ratov * 3) + 3);
        }
 
-       mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
-       spin_lock_irq(phba->host->host_lock);
-       phba->fc_flag |= FC_DISC_TMO;
-       spin_unlock_irq(phba->host->host_lock);
+
+       if (!timer_pending(&vport->fc_disctmo)) {
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+                       "set disc timer:  tmo:x%x state:x%x flg:x%x",
+                       tmo, vport->port_state, vport->fc_flag);
+       }
+
+       mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag |= FC_DISC_TMO;
+       spin_unlock_irq(shost->host_lock);
 
        /* Start Discovery Timer state <hba_state> */
-       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-                       "%d:0247 Start Discovery Timer state x%x "
-                       "Data: x%x x%lx x%x x%x\n",
-                       phba->brd_no,
-                       phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
-                       phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                        "0247 Start Discovery Timer state x%x "
+                        "Data: x%x x%lx x%x x%x\n",
+                        vport->port_state, tmo,
+                        (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
+                        vport->fc_adisc_cnt);
 
        return;
 }
@@ -1402,24 +2000,32 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
  * Cancel rescue timer for Discovery / RSCN handling
  */
 int
-lpfc_can_disctmo(struct lpfc_hba * phba)
+lpfc_can_disctmo(struct lpfc_vport *vport)
 {
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       unsigned long iflags;
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+               "can disc timer:  state:x%x rtry:x%x flg:x%x",
+               vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
        /* Turn off discovery timer if its running */
-       if (phba->fc_flag & FC_DISC_TMO) {
-               spin_lock_irq(phba->host->host_lock);
-               phba->fc_flag &= ~FC_DISC_TMO;
-               spin_unlock_irq(phba->host->host_lock);
-               del_timer_sync(&phba->fc_disctmo);
-               phba->work_hba_events &= ~WORKER_DISC_TMO;
+       if (vport->fc_flag & FC_DISC_TMO) {
+               spin_lock_irqsave(shost->host_lock, iflags);
+               vport->fc_flag &= ~FC_DISC_TMO;
+               spin_unlock_irqrestore(shost->host_lock, iflags);
+               del_timer_sync(&vport->fc_disctmo);
+               spin_lock_irqsave(&vport->work_port_lock, iflags);
+               vport->work_port_events &= ~WORKER_DISC_TMO;
+               spin_unlock_irqrestore(&vport->work_port_lock, iflags);
        }
 
        /* Cancel Discovery Timer state <hba_state> */
-       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-                       "%d:0248 Cancel Discovery Timer state x%x "
-                       "Data: x%x x%x x%x\n",
-                       phba->brd_no, phba->hba_state, phba->fc_flag,
-                       phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                        "0248 Cancel Discovery Timer state x%x "
+                        "Data: x%x x%x x%x\n",
+                        vport->port_state, vport->fc_flag,
+                        vport->fc_plogi_cnt, vport->fc_adisc_cnt);
        return 0;
 }
 
@@ -1428,15 +2034,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba)
  * Return true if iocb matches the specified nport
  */
 int
-lpfc_check_sli_ndlp(struct lpfc_hba * phba,
-                   struct lpfc_sli_ring * pring,
-                   struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
+lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+                   struct lpfc_sli_ring *pring,
+                   struct lpfc_iocbq *iocb,
+                   struct lpfc_nodelist *ndlp)
 {
-       struct lpfc_sli *psli;
-       IOCB_t *icmd;
+       struct lpfc_sli *psli = &phba->sli;
+       IOCB_t *icmd = &iocb->iocb;
+       struct lpfc_vport    *vport = ndlp->vport;
+
+       if (iocb->vport != vport)
+               return 0;
 
-       psli = &phba->sli;
-       icmd = &iocb->iocb;
        if (pring->ringno == LPFC_ELS_RING) {
                switch (icmd->ulpCommand) {
                case CMD_GEN_REQUEST64_CR:
@@ -1454,7 +2063,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
        } else if (pring->ringno == psli->fcp_ring) {
                /* Skip match check if waiting to relogin to FCP target */
                if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
-                 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+                   (ndlp->nlp_flag & NLP_DELAY_TMO)) {
                        return 0;
                }
                if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1471,7 +2080,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
  * associated with nlp_rpi in the LPFC_NODELIST entry.
  */
 static int
-lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        LIST_HEAD(completions);
        struct lpfc_sli *psli;
@@ -1480,6 +2089,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
        IOCB_t *icmd;
        uint32_t rpi, i;
 
+       lpfc_fabric_abort_nport(ndlp);
+
        /*
         * Everything that matches on txcmplq will be returned
         * by firmware with a no rpi error.
@@ -1491,15 +2102,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
                for (i = 0; i < psli->num_rings; i++) {
                        pring = &psli->ring[i];
 
-                       spin_lock_irq(phba->host->host_lock);
+                       spin_lock_irq(&phba->hbalock);
                        list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
-                                               list) {
+                                                list) {
                                /*
                                 * Check to see if iocb matches the nport we are
                                 * looking for
                                 */
-                               if ((lpfc_check_sli_ndlp
-                                    (phba, pring, iocb, ndlp))) {
+                               if ((lpfc_check_sli_ndlp(phba, pring, iocb,
+                                                        ndlp))) {
                                        /* It matches, so deque and call compl
                                           with an error */
                                        list_move_tail(&iocb->list,
@@ -1507,22 +2118,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
                                        pring->txq_cnt--;
                                }
                        }
-                       spin_unlock_irq(phba->host->host_lock);
-
+                       spin_unlock_irq(&phba->hbalock);
                }
        }
 
        while (!list_empty(&completions)) {
                iocb = list_get_first(&completions, struct lpfc_iocbq, list);
-               list_del(&iocb->list);
+               list_del_init(&iocb->list);
 
-               if (iocb->iocb_cmpl) {
+               if (!iocb->iocb_cmpl)
+                       lpfc_sli_release_iocbq(phba, iocb);
+               else {
                        icmd = &iocb->iocb;
                        icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
                        icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-                       (iocb->iocb_cmpl) (phba, iocb, iocb);
-               } else
-                       lpfc_sli_release_iocbq(phba, iocb);
+                       (iocb->iocb_cmpl)(phba, iocb, iocb);
+               }
        }
 
        return 0;
@@ -1538,25 +2149,70 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
  * we are waiting to PLOGI back to the remote NPort.
  */
 int
-lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-       LPFC_MBOXQ_t *mbox;
+       struct lpfc_hba *phba = vport->phba;
+       LPFC_MBOXQ_t    *mbox;
        int rc;
 
        if (ndlp->nlp_rpi) {
-               if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
-                       lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
-                       mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-                       rc = lpfc_sli_issue_mbox
-                                   (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+               mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (mbox) {
+                       lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+                       mbox->vport = vport;
+                       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
                        if (rc == MBX_NOT_FINISHED)
-                               mempool_free( mbox, phba->mbox_mem_pool);
+                               mempool_free(mbox, phba->mbox_mem_pool);
                }
                lpfc_no_rpi(phba, ndlp);
                ndlp->nlp_rpi = 0;
                return 1;
        }
-       return 0;
+       return 0;
+}
+
+void
+lpfc_unreg_all_rpis(struct lpfc_vport *vport)
+{
+       struct lpfc_hba  *phba  = vport->phba;
+       LPFC_MBOXQ_t     *mbox;
+       int rc;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (mbox) {
+               lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+               mbox->vport = vport;
+               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               mbox->context1 = NULL;
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free(mbox, phba->mbox_mem_pool);
+               }
+       }
+}
+
+void
+lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+{
+       struct lpfc_hba  *phba  = vport->phba;
+       LPFC_MBOXQ_t     *mbox;
+       int rc;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (mbox) {
+               lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+               mbox->vport = vport;
+               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               mbox->context1 = NULL;
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+               if (rc == MBX_NOT_FINISHED) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+                                        "1815 Could not issue "
+                                        "unreg_did (default rpis)\n");
+                       mempool_free(mbox, phba->mbox_mem_pool);
+               }
+       }
 }
 
 /*
@@ -1564,20 +2220,34 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
  * so it can be freed.
  */
 static int
-lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-       LPFC_MBOXQ_t       *mb;
-       LPFC_MBOXQ_t       *nextmb;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_hba  *phba = vport->phba;
+       LPFC_MBOXQ_t *mb, *nextmb;
        struct lpfc_dmabuf *mp;
 
        /* Cleanup node for NPort <nlp_DID> */
-       lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                       "%d:0900 Cleanup node for NPort x%x "
-                       "Data: x%x x%x x%x\n",
-                       phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
-                       ndlp->nlp_state, ndlp->nlp_rpi);
-
-       lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                        "0900 Cleanup node for NPort x%x "
+                        "Data: x%x x%x x%x\n",
+                        ndlp->nlp_DID, ndlp->nlp_flag,
+                        ndlp->nlp_state, ndlp->nlp_rpi);
+       if (NLP_CHK_FREE_REQ(ndlp)) {
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0280 lpfc_cleanup_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               lpfc_dequeue_node(vport, ndlp);
+       } else {
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0281 lpfc_cleanup_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               lpfc_disable_node(vport, ndlp);
+       }
 
        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
        if ((mb = phba->sli.mbox_active)) {
@@ -1588,33 +2258,38 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
                }
        }
 
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irq(&phba->hbalock);
        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
                if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
-                  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        mp = (struct lpfc_dmabuf *) (mb->context1);
                        if (mp) {
-                               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                               __lpfc_mbuf_free(phba, mp->virt, mp->phys);
                                kfree(mp);
                        }
                        list_del(&mb->list);
                        mempool_free(mb, phba->mbox_mem_pool);
+                       /* We shall not invoke the lpfc_nlp_put to decrement
+                        * the ndlp reference count as we are in the process
+                        * of lpfc_nlp_release.
+                        */
                }
        }
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
 
-       lpfc_els_abort(phba,ndlp);
-       spin_lock_irq(phba->host->host_lock);
+       lpfc_els_abort(phba, ndlp);
+
+       spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(shost->host_lock);
 
        ndlp->nlp_last_elscmd = 0;
        del_timer_sync(&ndlp->nlp_delayfunc);
 
-       if (!list_empty(&ndlp->els_retry_evt.evt_listp))
-               list_del_init(&ndlp->els_retry_evt.evt_listp);
+       list_del_init(&ndlp->els_retry_evt.evt_listp);
+       list_del_init(&ndlp->dev_loss_evt.evt_listp);
 
-       lpfc_unreg_rpi(phba, ndlp);
+       lpfc_unreg_rpi(vport, ndlp);
 
        return 0;
 }
@@ -1624,53 +2299,67 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
  * If we are in the middle of using the nlp in the discovery state
  * machine, defer the free till we reach the end of the state machine.
  */
-int
-lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+static void
+lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
+       struct lpfc_hba  *phba = vport->phba;
        struct lpfc_rport_data *rdata;
+       LPFC_MBOXQ_t *mbox;
+       int rc;
 
-       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-               lpfc_cancel_retry_delay_tmo(phba, ndlp);
-       }
-
-       if (ndlp->nlp_disc_refcnt) {
-               spin_lock_irq(phba->host->host_lock);
-               ndlp->nlp_flag |= NLP_DELAY_REMOVE;
-               spin_unlock_irq(phba->host->host_lock);
-       } else {
-               lpfc_freenode(phba, ndlp);
-
-               if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
-                       rdata = ndlp->rport->dd_data;
-                       rdata->pnode = NULL;
-                       ndlp->rport = NULL;
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
+               /* For this case we need to cleanup the default rpi
+                * allocated by the firmware.
+                */
+               if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
+                       != NULL) {
+                       rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
+                           (uint8_t *) &vport->fc_sparam, mbox, 0);
+                       if (rc) {
+                               mempool_free(mbox, phba->mbox_mem_pool);
+                       }
+                       else {
+                               mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+                               mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+                               mbox->vport = vport;
+                               mbox->context2 = NULL;
+                               rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+                               if (rc == MBX_NOT_FINISHED) {
+                                       mempool_free(mbox, phba->mbox_mem_pool);
+                               }
+                       }
                }
+       }
+       lpfc_cleanup_node(vport, ndlp);
 
-               mempool_free( ndlp, phba->nlp_mem_pool);
+       /*
+        * We can get here with a non-NULL ndlp->rport because when we
+        * unregister a rport we don't break the rport/node linkage.  So if we
+        * do, make sure we don't leaving any dangling pointers behind.
+        */
+       if (ndlp->rport) {
+               rdata = ndlp->rport->dd_data;
+               rdata->pnode = NULL;
+               ndlp->rport = NULL;
        }
-       return 0;
 }
 
 static int
-lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
+lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+             uint32_t did)
 {
-       D_ID mydid;
-       D_ID ndlpdid;
-       D_ID matchdid;
+       D_ID mydid, ndlpdid, matchdid;
 
        if (did == Bcast_DID)
                return 0;
 
-       if (ndlp->nlp_DID == 0) {
-               return 0;
-       }
-
        /* First check for Direct match */
        if (ndlp->nlp_DID == did)
                return 1;
 
        /* Next check for area/domain identically equals 0 match */
-       mydid.un.word = phba->fc_myDID;
+       mydid.un.word = vport->fc_myDID;
        if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
                return 0;
        }
@@ -1701,126 +2390,134 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
        return 0;
 }
 
-/* Search for a nodelist entry on a specific list */
-struct lpfc_nodelist *
-lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
+/* Search for a nodelist entry */
+static struct lpfc_nodelist *
+__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
 {
        struct lpfc_nodelist *ndlp;
-       struct list_head *lists[]={&phba->fc_nlpunmap_list,
-                                  &phba->fc_nlpmap_list,
-                                  &phba->fc_plogi_list,
-                                  &phba->fc_adisc_list,
-                                  &phba->fc_reglogin_list,
-                                  &phba->fc_prli_list,
-                                  &phba->fc_npr_list,
-                                  &phba->fc_unused_list};
-       uint32_t search[]={NLP_SEARCH_UNMAPPED,
-                          NLP_SEARCH_MAPPED,
-                          NLP_SEARCH_PLOGI,
-                          NLP_SEARCH_ADISC,
-                          NLP_SEARCH_REGLOGIN,
-                          NLP_SEARCH_PRLI,
-                          NLP_SEARCH_NPR,
-                          NLP_SEARCH_UNUSED};
-       int i;
        uint32_t data1;
 
-       spin_lock_irq(phba->host->host_lock);
-       for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
-               if (!(order & search[i]))
-                       continue;
-               list_for_each_entry(ndlp, lists[i], nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0929 FIND node DID "
-                                               " Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
+       list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+               if (lpfc_matchdid(vport, ndlp, did)) {
+                       data1 = (((uint32_t) ndlp->nlp_state << 24) |
+                                ((uint32_t) ndlp->nlp_xri << 16) |
+                                ((uint32_t) ndlp->nlp_type << 8) |
+                                ((uint32_t) ndlp->nlp_rpi & 0xff));
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                                        "0929 FIND node DID "
+                                        "Data: x%p x%x x%x x%x\n",
+                                        ndlp, ndlp->nlp_DID,
+                                        ndlp->nlp_flag, data1);
+                       return ndlp;
                }
        }
-       spin_unlock_irq(phba->host->host_lock);
 
        /* FIND node did <did> NOT FOUND */
-       lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                       "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
-                       phba->brd_no, did, order);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                        "0932 FIND node did x%x NOT FOUND.\n", did);
        return NULL;
 }
 
 struct lpfc_nodelist *
-lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
+lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
 {
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_nodelist *ndlp;
-       uint32_t flg;
 
-       ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
+       spin_lock_irq(shost->host_lock);
+       ndlp = __lpfc_findnode_did(vport, did);
+       spin_unlock_irq(shost->host_lock);
+       return ndlp;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_nodelist *ndlp;
+
+       ndlp = lpfc_findnode_did(vport, did);
        if (!ndlp) {
-               if ((phba->fc_flag & FC_RSCN_MODE) &&
-                  ((lpfc_rscn_payload_check(phba, did) == 0)))
+               if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+                   lpfc_rscn_payload_check(vport, did) == 0)
                        return NULL;
                ndlp = (struct lpfc_nodelist *)
-                    mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+                    mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+               if (!ndlp)
+                       return NULL;
+               lpfc_nlp_init(vport, ndlp, did);
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+               spin_lock_irq(shost->host_lock);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irq(shost->host_lock);
+               return ndlp;
+       } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+               ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
                if (!ndlp)
                        return NULL;
-               lpfc_nlp_init(phba, ndlp, did);
-               ndlp->nlp_state = NLP_STE_NPR_NODE;
-               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irq(shost->host_lock);
                return ndlp;
        }
-       if (phba->fc_flag & FC_RSCN_MODE) {
-               if (lpfc_rscn_payload_check(phba, did)) {
+
+       if ((vport->fc_flag & FC_RSCN_MODE) &&
+           !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+               if (lpfc_rscn_payload_check(vport, did)) {
+                       /* If we've already recieved a PLOGI from this NPort
+                        * we don't need to try to discover it again.
+                        */
+                       if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+                               return NULL;
+
+                       spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+                       spin_unlock_irq(shost->host_lock);
 
                        /* Since this node is marked for discovery,
                         * delay timeout is not needed.
                         */
-                       if (ndlp->nlp_flag & NLP_DELAY_TMO)
-                               lpfc_cancel_retry_delay_tmo(phba, ndlp);
+                       lpfc_cancel_retry_delay_tmo(vport, ndlp);
                } else
                        ndlp = NULL;
        } else {
-               flg = ndlp->nlp_flag & NLP_LIST_MASK;
-               if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
+               /* If we've already recieved a PLOGI from this NPort,
+                * or we are already in the process of discovery on it,
+                * we don't need to try to discover it again.
+                */
+               if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
+                   ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+                   ndlp->nlp_flag & NLP_RCV_PLOGI)
                        return NULL;
-               ndlp->nlp_state = NLP_STE_NPR_NODE;
-               lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+               spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irq(shost->host_lock);
        }
        return ndlp;
 }
 
 /* Build a list of nodes to discover based on the loopmap */
 void
-lpfc_disc_list_loopmap(struct lpfc_hba * phba)
+lpfc_disc_list_loopmap(struct lpfc_vport *vport)
 {
+       struct lpfc_hba  *phba = vport->phba;
        int j;
        uint32_t alpa, index;
 
-       if (phba->hba_state <= LPFC_LINK_DOWN) {
+       if (!lpfc_is_link_up(phba))
                return;
-       }
-       if (phba->fc_topology != TOPOLOGY_LOOP) {
+
+       if (phba->fc_topology != TOPOLOGY_LOOP)
                return;
-       }
 
        /* Check for loop map present or not */
        if (phba->alpa_map[0]) {
                for (j = 1; j <= phba->alpa_map[0]; j++) {
                        alpa = phba->alpa_map[j];
-
-                       if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
+                       if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
                                continue;
-                       }
-                       lpfc_setup_disc_node(phba, alpa);
+                       lpfc_setup_disc_node(vport, alpa);
                }
        } else {
                /* No alpamap, so try all alpa's */
@@ -1828,118 +2525,170 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba)
                        /* If cfg_scan_down is set, start from highest
                         * ALPA (0xef) to lowest (0x1).
                         */
-                       if (phba->cfg_scan_down)
+                       if (vport->cfg_scan_down)
                                index = j;
                        else
                                index = FC_MAXLOOP - j - 1;
                        alpa = lpfcAlpaArray[index];
-                       if ((phba->fc_myDID & 0xff) == alpa) {
+                       if ((vport->fc_myDID & 0xff) == alpa)
                                continue;
-                       }
-
-                       lpfc_setup_disc_node(phba, alpa);
+                       lpfc_setup_disc_node(vport, alpa);
                }
        }
        return;
 }
 
-/* Start Link up / RSCN discovery on NPR list */
 void
-lpfc_disc_start(struct lpfc_hba * phba)
+lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
 {
-       struct lpfc_sli *psli;
        LPFC_MBOXQ_t *mbox;
-       struct lpfc_nodelist *ndlp, *next_ndlp;
-       uint32_t did_changed, num_sent;
-       uint32_t clear_la_pending;
-       int rc;
-
-       psli = &phba->sli;
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
+       struct lpfc_sli_ring *fcp_ring   = &psli->ring[psli->fcp_ring];
+       struct lpfc_sli_ring *next_ring  = &psli->ring[psli->next_ring];
+       int  rc;
 
-       if (phba->hba_state <= LPFC_LINK_DOWN) {
+       /*
+        * if it's not a physical port or if we already send
+        * clear_la then don't send it.
+        */
+       if ((phba->link_state >= LPFC_CLEAR_LA) ||
+           (vport->port_type != LPFC_PHYSICAL_PORT))
                return;
+
+                       /* Link up discovery */
+       if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
+               phba->link_state = LPFC_CLEAR_LA;
+               lpfc_clear_la(phba, mbox);
+               mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+               mbox->vport = vport;
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free(mbox, phba->mbox_mem_pool);
+                       lpfc_disc_flush_list(vport);
+                       extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+                       fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+                       next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+                       phba->link_state = LPFC_HBA_ERROR;
+               }
+       }
+}
+
+/* Reg_vpi to tell firmware to resume normal operations */
+void
+lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+       LPFC_MBOXQ_t *regvpimbox;
+
+       regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (regvpimbox) {
+               lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
+               regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
+               regvpimbox->vport = vport;
+               if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
+                                       == MBX_NOT_FINISHED) {
+                       mempool_free(regvpimbox, phba->mbox_mem_pool);
+               }
        }
-       if (phba->hba_state == LPFC_CLEAR_LA)
+}
+
+/* Start Link up / RSCN discovery on NPR nodes */
+void
+lpfc_disc_start(struct lpfc_vport *vport)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_hba  *phba = vport->phba;
+       uint32_t num_sent;
+       uint32_t clear_la_pending;
+       int did_changed;
+
+       if (!lpfc_is_link_up(phba))
+               return;
+
+       if (phba->link_state == LPFC_CLEAR_LA)
                clear_la_pending = 1;
        else
                clear_la_pending = 0;
 
-       if (phba->hba_state < LPFC_HBA_READY) {
-               phba->hba_state = LPFC_DISC_AUTH;
-       }
-       lpfc_set_disctmo(phba);
+       if (vport->port_state < LPFC_VPORT_READY)
+               vport->port_state = LPFC_DISC_AUTH;
+
+       lpfc_set_disctmo(vport);
 
-       if (phba->fc_prevDID == phba->fc_myDID) {
+       if (vport->fc_prevDID == vport->fc_myDID)
                did_changed = 0;
-       } else {
+       else
                did_changed = 1;
-       }
-       phba->fc_prevDID = phba->fc_myDID;
-       phba->num_disc_nodes = 0;
+
+       vport->fc_prevDID = vport->fc_myDID;
+       vport->num_disc_nodes = 0;
 
        /* Start Discovery state <hba_state> */
-       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-                       "%d:0202 Start Discovery hba state x%x "
-                       "Data: x%x x%x x%x\n",
-                       phba->brd_no, phba->hba_state, phba->fc_flag,
-                       phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-
-       /* If our did changed, we MUST do PLOGI */
-       list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-                               nlp_listp) {
-               if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
-                       if (did_changed) {
-                               spin_lock_irq(phba->host->host_lock);
-                               ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-                               spin_unlock_irq(phba->host->host_lock);
-                       }
-               }
-       }
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                        "0202 Start Discovery hba state x%x "
+                        "Data: x%x x%x x%x\n",
+                        vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
+                        vport->fc_adisc_cnt);
 
        /* First do ADISCs - if any */
-       num_sent = lpfc_els_disc_adisc(phba);
+       num_sent = lpfc_els_disc_adisc(vport);
 
        if (num_sent)
                return;
 
-       if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
+       /*
+        * For SLI3, cmpl_reg_vpi will set port_state to READY, and
+        * continue discovery.
+        */
+       if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+           !(vport->fc_flag & FC_PT2PT) &&
+           !(vport->fc_flag & FC_RSCN_MODE)) {
+               lpfc_issue_reg_vpi(phba, vport);
+               return;
+       }
+
+       /*
+        * For SLI2, we need to set port_state to READY and continue
+        * discovery.
+        */
+       if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
                /* If we get here, there is nothing to ADISC */
-               if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
-                       phba->hba_state = LPFC_CLEAR_LA;
-                       lpfc_clear_la(phba, mbox);
-                       mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-                       rc = lpfc_sli_issue_mbox(phba, mbox,
-                                                (MBX_NOWAIT | MBX_STOP_IOCB));
-                       if (rc == MBX_NOT_FINISHED) {
-                               mempool_free( mbox, phba->mbox_mem_pool);
-                               lpfc_disc_flush_list(phba);
-                               psli->ring[(psli->extra_ring)].flag &=
-                                       ~LPFC_STOP_IOCB_EVENT;
-                               psli->ring[(psli->fcp_ring)].flag &=
-                                       ~LPFC_STOP_IOCB_EVENT;
-                               psli->ring[(psli->next_ring)].flag &=
-                                       ~LPFC_STOP_IOCB_EVENT;
-                               phba->hba_state = LPFC_HBA_READY;
+               if (vport->port_type == LPFC_PHYSICAL_PORT)
+                       lpfc_issue_clear_la(phba, vport);
+
+               if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+                       vport->num_disc_nodes = 0;
+                       /* go thru NPR nodes and issue ELS PLOGIs */
+                       if (vport->fc_npr_cnt)
+                               lpfc_els_disc_plogi(vport);
+
+                       if (!vport->num_disc_nodes) {
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_NDISC_ACTIVE;
+                               spin_unlock_irq(shost->host_lock);
+                               lpfc_can_disctmo(vport);
                        }
                }
+               vport->port_state = LPFC_VPORT_READY;
        } else {
                /* Next do PLOGIs - if any */
-               num_sent = lpfc_els_disc_plogi(phba);
+               num_sent = lpfc_els_disc_plogi(vport);
 
                if (num_sent)
                        return;
 
-               if (phba->fc_flag & FC_RSCN_MODE) {
+               if (vport->fc_flag & FC_RSCN_MODE) {
                        /* Check to see if more RSCNs came in while we
                         * were processing this one.
                         */
-                       if ((phba->fc_rscn_id_cnt == 0) &&
-                           (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
-                               spin_lock_irq(phba->host->host_lock);
-                               phba->fc_flag &= ~FC_RSCN_MODE;
-                               spin_unlock_irq(phba->host->host_lock);
+                       if ((vport->fc_rscn_id_cnt == 0) &&
+                           (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_RSCN_MODE;
+                               spin_unlock_irq(shost->host_lock);
+                               lpfc_can_disctmo(vport);
                        } else
-                               lpfc_els_handle_rscn(phba);
+                               lpfc_els_handle_rscn(vport);
                }
        }
        return;
@@ -1950,7 +2699,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
  *  ring the match the sppecified nodelist.
  */
 static void
-lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        LIST_HEAD(completions);
        struct lpfc_sli *psli;
@@ -1964,7 +2713,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
        /* Error matching iocb on txq or txcmplq
         * First check the txq.
         */
-       spin_lock_irq(phba->host->host_lock);
+       spin_lock_irq(&phba->hbalock);
        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
                if (iocb->context1 != ndlp) {
                        continue;
@@ -1984,49 +2733,53 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
                        continue;
                }
                icmd = &iocb->iocb;
-               if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
-                   (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+               if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
+                   icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
                        lpfc_sli_issue_abort_iotag(phba, pring, iocb);
                }
        }
-       spin_unlock_irq(phba->host->host_lock);
+       spin_unlock_irq(&phba->hbalock);
 
        while (!list_empty(&completions)) {
                iocb = list_get_first(&completions, struct lpfc_iocbq, list);
-               list_del(&iocb->list);
+               list_del_init(&iocb->list);
 
-               if (iocb->iocb_cmpl) {
+               if (!iocb->iocb_cmpl)
+                       lpfc_sli_release_iocbq(phba, iocb);
+               else {
                        icmd = &iocb->iocb;
                        icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
                        icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
                        (iocb->iocb_cmpl) (phba, iocb, iocb);
-               } else
-                       lpfc_sli_release_iocbq(phba, iocb);
+               }
        }
-
-       return;
 }
 
-void
-lpfc_disc_flush_list(struct lpfc_hba * phba)
+static void
+lpfc_disc_flush_list(struct lpfc_vport *vport)
 {
        struct lpfc_nodelist *ndlp, *next_ndlp;
+       struct lpfc_hba *phba = vport->phba;
 
-       if (phba->fc_plogi_cnt) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
-                                       nlp_listp) {
-                       lpfc_free_tx(phba, ndlp);
-                       lpfc_nlp_remove(phba, ndlp);
-               }
-       }
-       if (phba->fc_adisc_cnt) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-                                       nlp_listp) {
-                       lpfc_free_tx(phba, ndlp);
-                       lpfc_nlp_remove(phba, ndlp);
+       if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+                                        nlp_listp) {
+                       if (!NLP_CHK_NODE_ACT(ndlp))
+                               continue;
+                       if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+                           ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
+                               lpfc_free_tx(phba, ndlp);
+                       }
                }
        }
-       return;
+}
+
+void
+lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+{
+       lpfc_els_flush_rscn(vport);
+       lpfc_els_flush_cmd(vport);
+       lpfc_disc_flush_list(vport);
 }
 
 /*****************************************************************************/
@@ -2047,157 +2800,149 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
 void
 lpfc_disc_timeout(unsigned long ptr)
 {
-       struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+       struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+       struct lpfc_hba   *phba = vport->phba;
+       uint32_t tmo_posted;
        unsigned long flags = 0;
 
        if (unlikely(!phba))
                return;
 
-       spin_lock_irqsave(phba->host->host_lock, flags);
-       if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
-               phba->work_hba_events |= WORKER_DISC_TMO;
-               if (phba->work_wait)
-                       wake_up(phba->work_wait);
-       }
-       spin_unlock_irqrestore(phba->host->host_lock, flags);
+       spin_lock_irqsave(&vport->work_port_lock, flags);
+       tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+       if (!tmo_posted)
+               vport->work_port_events |= WORKER_DISC_TMO;
+       spin_unlock_irqrestore(&vport->work_port_lock, flags);
+
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }
 
 static void
-lpfc_disc_timeout_handler(struct lpfc_hba *phba)
+lpfc_disc_timeout_handler(struct lpfc_vport *vport)
 {
-       struct lpfc_sli *psli;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_hba  *phba = vport->phba;
+       struct lpfc_sli  *psli = &phba->sli;
        struct lpfc_nodelist *ndlp, *next_ndlp;
-       LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
+       LPFC_MBOXQ_t *initlinkmbox;
        int rc, clrlaerr = 0;
 
-       if (unlikely(!phba))
-               return;
-
-       if (!(phba->fc_flag & FC_DISC_TMO))
+       if (!(vport->fc_flag & FC_DISC_TMO))
                return;
 
-       psli = &phba->sli;
+       spin_lock_irq(shost->host_lock);
+       vport->fc_flag &= ~FC_DISC_TMO;
+       spin_unlock_irq(shost->host_lock);
 
-       spin_lock_irq(phba->host->host_lock);
-       phba->fc_flag &= ~FC_DISC_TMO;
-       spin_unlock_irq(phba->host->host_lock);
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+               "disc timeout:    state:x%x rtry:x%x flg:x%x",
+               vport->port_state, vport->fc_ns_retry, vport->fc_flag);
 
-       switch (phba->hba_state) {
+       switch (vport->port_state) {
 
        case LPFC_LOCAL_CFG_LINK:
-       /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
-               /* FAN timeout */
-               lpfc_printf_log(phba,
-                                KERN_WARNING,
-                                LOG_DISCOVERY,
-                                "%d:0221 FAN timeout\n",
-                                phba->brd_no);
-
+       /* port_state is identically  LPFC_LOCAL_CFG_LINK while waiting for
+        * FAN
+        */
+                               /* FAN timeout */
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+                                "0221 FAN timeout\n");
                /* Start discovery by sending FLOGI, clean up old rpis */
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-                                       nlp_listp) {
+               list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+                                        nlp_listp) {
+                       if (!NLP_CHK_NODE_ACT(ndlp))
+                               continue;
+                       if (ndlp->nlp_state != NLP_STE_NPR_NODE)
+                               continue;
                        if (ndlp->nlp_type & NLP_FABRIC) {
                                /* Clean up the ndlp on Fabric connections */
-                               lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+                               lpfc_drop_node(vport, ndlp);
+
                        } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
                                /* Fail outstanding IO now since device
                                 * is marked for PLOGI.
                                 */
-                               lpfc_unreg_rpi(phba, ndlp);
+                               lpfc_unreg_rpi(vport, ndlp);
                        }
                }
-               phba->hba_state = LPFC_FLOGI;
-               lpfc_set_disctmo(phba);
-               lpfc_initial_flogi(phba);
+               if (vport->port_state != LPFC_FLOGI) {
+                       lpfc_initial_flogi(vport);
+                       return;
+               }
                break;
 
+       case LPFC_FDISC:
        case LPFC_FLOGI:
-       /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+       /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
                /* Initial FLOGI timeout */
-               lpfc_printf_log(phba,
-                                KERN_ERR,
-                                LOG_DISCOVERY,
-                                "%d:0222 Initial FLOGI timeout\n",
-                                phba->brd_no);
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0222 Initial %s timeout\n",
+                                vport->vpi ? "FDISC" : "FLOGI");
 
                /* Assume no Fabric and go on with discovery.
                 * Check for outstanding ELS FLOGI to abort.
                 */
 
                /* FLOGI failed, so just use loop map to make discovery list */
-               lpfc_disc_list_loopmap(phba);
+               lpfc_disc_list_loopmap(vport);
 
                /* Start discovery */
-               lpfc_disc_start(phba);
+               lpfc_disc_start(vport);
                break;
 
        case LPFC_FABRIC_CFG_LINK:
        /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
           NameServer login */
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                               "%d:0223 Timeout while waiting for NameServer "
-                               "login\n", phba->brd_no);
-
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0223 Timeout while waiting for "
+                                "NameServer login\n");
                /* Next look for NameServer ndlp */
-               ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
-               if (ndlp)
-                       lpfc_nlp_remove(phba, ndlp);
-               /* Start discovery */
-               lpfc_disc_start(phba);
-               break;
+               ndlp = lpfc_findnode_did(vport, NameServer_DID);
+               if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+                       lpfc_els_abort(phba, ndlp);
+
+               /* ReStart discovery */
+               goto restart_disc;
 
        case LPFC_NS_QRY:
        /* Check for wait for NameServer Rsp timeout */
-               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                               "%d:0224 NameServer Query timeout "
-                               "Data: x%x x%x\n",
-                               phba->brd_no,
-                               phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-
-               ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
-                                                               NameServer_DID);
-               if (ndlp) {
-                       if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
-                               /* Try it one more time */
-                               rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
-                               if (rc == 0)
-                                       break;
-                       }
-                       phba->fc_ns_retry = 0;
-               }
-
-               /* Nothing to authenticate, so CLEAR_LA right now */
-               clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-               if (!clearlambox) {
-                       clrlaerr = 1;
-                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                                       "%d:0226 Device Discovery "
-                                       "completion error\n",
-                                       phba->brd_no);
-                       phba->hba_state = LPFC_HBA_ERROR;
-                       break;
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0224 NameServer Query timeout "
+                                "Data: x%x x%x\n",
+                                vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+               if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+                       /* Try it one more time */
+                       vport->fc_ns_retry++;
+                       rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+                                        vport->fc_ns_retry, 0);
+                       if (rc == 0)
+                               break;
                }
+               vport->fc_ns_retry = 0;
 
-               phba->hba_state = LPFC_CLEAR_LA;
-               lpfc_clear_la(phba, clearlambox);
-               clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-               rc = lpfc_sli_issue_mbox(phba, clearlambox,
-                                        (MBX_NOWAIT | MBX_STOP_IOCB));
-               if (rc == MBX_NOT_FINISHED) {
-                       mempool_free(clearlambox, phba->mbox_mem_pool);
-                       clrlaerr = 1;
-                       break;
+restart_disc:
+               /*
+                * Discovery is over.
+                * set port_state to PORT_READY if SLI2.
+                * cmpl_reg_vpi will set port_state to READY for SLI3.
+                */
+               if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+                       lpfc_issue_reg_vpi(phba, vport);
+               else  { /* NPIV Not enabled */
+                       lpfc_issue_clear_la(phba, vport);
+                       vport->port_state = LPFC_VPORT_READY;
                }
 
                /* Setup and issue mailbox INITIALIZE LINK command */
                initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (!initlinkmbox) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                                       "%d:0206 Device Discovery "
-                                       "completion error\n",
-                                       phba->brd_no);
-                       phba->hba_state = LPFC_HBA_ERROR;
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                        "0206 Device Discovery "
+                                        "completion error\n");
+                       phba->link_state = LPFC_HBA_ERROR;
                        break;
                }
 
@@ -2205,8 +2950,10 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
                lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
                               phba->cfg_link_speed);
                initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
-               rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
-                                        (MBX_NOWAIT | MBX_STOP_IOCB));
+               initlinkmbox->vport = vport;
+               initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
+               lpfc_set_loopback_flag(phba);
                if (rc == MBX_NOT_FINISHED)
                        mempool_free(initlinkmbox, phba->mbox_mem_pool);
 
@@ -2214,67 +2961,77 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
 
        case LPFC_DISC_AUTH:
        /* Node Authentication timeout */
-               lpfc_printf_log(phba,
-                                KERN_ERR,
-                                LOG_DISCOVERY,
-                                "%d:0227 Node Authentication timeout\n",
-                                phba->brd_no);
-               lpfc_disc_flush_list(phba);
-               clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-               if (!clearlambox) {
-                       clrlaerr = 1;
-                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                                       "%d:0207 Device Discovery "
-                                       "completion error\n",
-                                       phba->brd_no);
-                       phba->hba_state = LPFC_HBA_ERROR;
-                       break;
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0227 Node Authentication timeout\n");
+               lpfc_disc_flush_list(vport);
+
+               /*
+                * set port_state to PORT_READY if SLI2.
+                * cmpl_reg_vpi will set port_state to READY for SLI3.
+                */
+               if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+                       lpfc_issue_reg_vpi(phba, vport);
+               else {  /* NPIV Not enabled */
+                       lpfc_issue_clear_la(phba, vport);
+                       vport->port_state = LPFC_VPORT_READY;
                }
-               phba->hba_state = LPFC_CLEAR_LA;
-               lpfc_clear_la(phba, clearlambox);
-               clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-               rc = lpfc_sli_issue_mbox(phba, clearlambox,
-                                        (MBX_NOWAIT | MBX_STOP_IOCB));
-               if (rc == MBX_NOT_FINISHED) {
-                       mempool_free(clearlambox, phba->mbox_mem_pool);
-                       clrlaerr = 1;
+               break;
+
+       case LPFC_VPORT_READY:
+               if (vport->fc_flag & FC_RSCN_MODE) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                        "0231 RSCN timeout Data: x%x "
+                                        "x%x\n",
+                                        vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+                       /* Cleanup any outstanding ELS commands */
+                       lpfc_els_flush_cmd(vport);
+
+                       lpfc_els_flush_rscn(vport);
+                       lpfc_disc_flush_list(vport);
                }
                break;
 
+       default:
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0273 Unexpected discovery timeout, "
+                                "vport State x%x\n", vport->port_state);
+               break;
+       }
+
+       switch (phba->link_state) {
        case LPFC_CLEAR_LA:
-       /* CLEAR LA timeout */
-               lpfc_printf_log(phba,
-                                KERN_ERR,
-                                LOG_DISCOVERY,
-                                "%d:0228 CLEAR LA timeout\n",
-                                phba->brd_no);
+                               /* CLEAR LA timeout */
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0228 CLEAR LA timeout\n");
                clrlaerr = 1;
                break;
 
-       case LPFC_HBA_READY:
-               if (phba->fc_flag & FC_RSCN_MODE) {
-                       lpfc_printf_log(phba,
-                                       KERN_ERR,
-                                       LOG_DISCOVERY,
-                                       "%d:0231 RSCN timeout Data: x%x x%x\n",
-                                       phba->brd_no,
-                                       phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-
-                       /* Cleanup any outstanding ELS commands */
-                       lpfc_els_flush_cmd(phba);
+       case LPFC_LINK_UP:
+               lpfc_issue_clear_la(phba, vport);
+               /* Drop thru */
+       case LPFC_LINK_UNKNOWN:
+       case LPFC_WARM_START:
+       case LPFC_INIT_START:
+       case LPFC_INIT_MBX_CMDS:
+       case LPFC_LINK_DOWN:
+       case LPFC_HBA_ERROR:
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+                                "0230 Unexpected timeout, hba link "
+                                "state x%x\n", phba->link_state);
+               clrlaerr = 1;
+               break;
 
-                       lpfc_els_flush_rscn(phba);
-                       lpfc_disc_flush_list(phba);
-               }
+       case LPFC_HBA_READY:
                break;
        }
 
        if (clrlaerr) {
-               lpfc_disc_flush_list(phba);
+               lpfc_disc_flush_list(vport);
                psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-               phba->hba_state = LPFC_HBA_READY;
+               vport->port_state = LPFC_VPORT_READY;
        }
 
        return;
@@ -2287,136 +3044,266 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli;
-       MAILBOX_t *mb;
-       struct lpfc_dmabuf *mp;
-       struct lpfc_nodelist *ndlp;
-
-       psli = &phba->sli;
-       mb = &pmb->mb;
-
-       ndlp = (struct lpfc_nodelist *) pmb->context2;
-       mp = (struct lpfc_dmabuf *) (pmb->context1);
+       MAILBOX_t *mb = &pmb->mb;
+       struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
+       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+       struct lpfc_vport    *vport = pmb->vport;
 
        pmb->context1 = NULL;
 
        ndlp->nlp_rpi = mb->un.varWords[0];
        ndlp->nlp_type |= NLP_FABRIC;
-       ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-       lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
-       /* Start issuing Fabric-Device Management Interface (FDMI)
-        * command to 0xfffffa (FDMI well known port)
+       /*
+        * Start issuing Fabric-Device Management Interface (FDMI) command to
+        * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
+        * fdmi-on=2 (supporting RPA/hostnmae)
         */
-       if (phba->cfg_fdmi_on == 1) {
-               lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
-       } else {
-               /*
-                * Delay issuing FDMI command if fdmi-on=2
-                * (supporting RPA/hostnmae)
-                */
-               mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
-       }
 
+       if (vport->cfg_fdmi_on == 1)
+               lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+       else
+               mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+
+       /* decrement the node reference count held for this callback
+        * function.
+        */
+       lpfc_nlp_put(ndlp);
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
-       mempool_free( pmb, phba->mbox_mem_pool);
+       mempool_free(pmb, phba->mbox_mem_pool);
 
        return;
 }
 
-/*
- * This routine looks up the ndlp  lists
- * for the given RPI. If rpi found
- * it return the node list pointer
- * else return NULL.
- */
-struct lpfc_nodelist *
-__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+static int
+lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
+{
+       uint16_t *rpi = param;
+
+       return ndlp->nlp_rpi == *rpi;
+}
+
+static int
+lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
+{
+       return memcmp(&ndlp->nlp_portname, param,
+                     sizeof(ndlp->nlp_portname)) == 0;
+}
+
+static struct lpfc_nodelist *
+__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
 {
        struct lpfc_nodelist *ndlp;
-       struct list_head * lists[]={&phba->fc_nlpunmap_list,
-                                   &phba->fc_nlpmap_list,
-                                   &phba->fc_plogi_list,
-                                   &phba->fc_adisc_list,
-                                   &phba->fc_reglogin_list};
-       int i;
 
-       for (i = 0; i < ARRAY_SIZE(lists); i++ )
-               list_for_each_entry(ndlp, lists[i], nlp_listp)
-                       if (ndlp->nlp_rpi == rpi) {
-                               return ndlp;
-                       }
+       list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+               if (filter(ndlp, param))
+                       return ndlp;
+       }
        return NULL;
 }
 
+/*
+ * This routine looks up the ndlp lists for the given RPI. If rpi found it
+ * returns the node list element pointer else return NULL.
+ */
 struct lpfc_nodelist *
-lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
 {
-       struct lpfc_nodelist *ndlp;
-
-       spin_lock_irq(phba->host->host_lock);
-       ndlp = __lpfc_findnode_rpi(phba, rpi);
-       spin_unlock_irq(phba->host->host_lock);
-       return ndlp;
+       return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
 }
 
 /*
- * This routine looks up the ndlp  lists
- * for the given WWPN. If WWPN found
- * it return the node list pointer
- * else return NULL.
+ * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
+ * returns the node element list pointer else return NULL.
  */
 struct lpfc_nodelist *
-lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
-                  struct lpfc_name * wwpn)
+lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
 {
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_nodelist *ndlp;
-       struct list_head * lists[]={&phba->fc_nlpunmap_list,
-                                   &phba->fc_nlpmap_list,
-                                   &phba->fc_npr_list,
-                                   &phba->fc_plogi_list,
-                                   &phba->fc_adisc_list,
-                                   &phba->fc_reglogin_list,
-                                   &phba->fc_prli_list};
-       uint32_t search[]={NLP_SEARCH_UNMAPPED,
-                          NLP_SEARCH_MAPPED,
-                          NLP_SEARCH_NPR,
-                          NLP_SEARCH_PLOGI,
-                          NLP_SEARCH_ADISC,
-                          NLP_SEARCH_REGLOGIN,
-                          NLP_SEARCH_PRLI};
-       int i;
 
-       spin_lock_irq(phba->host->host_lock);
-       for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
-               if (!(order & search[i]))
-                       continue;
-               list_for_each_entry(ndlp, lists[i], nlp_listp) {
-                       if (memcmp(&ndlp->nlp_portname, wwpn,
-                                  sizeof(struct lpfc_name)) == 0) {
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-       spin_unlock_irq(phba->host->host_lock);
-       return NULL;
+       spin_lock_irq(shost->host_lock);
+       ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
+       spin_unlock_irq(shost->host_lock);
+       return ndlp;
 }
 
 void
-lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-                uint32_t did)
+lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+             uint32_t did)
 {
        memset(ndlp, 0, sizeof (struct lpfc_nodelist));
-       INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
-       init_timer(&ndlp->nlp_delayfunc);
-       ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
-       ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
-       ndlp->nlp_DID = did;
-       ndlp->nlp_phba = phba;
-       ndlp->nlp_sid = NLP_NO_SID;
+
+       lpfc_initialize_node(vport, ndlp, did);
+       INIT_LIST_HEAD(&ndlp->nlp_listp);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+               "node init:       did:x%x",
+               ndlp->nlp_DID, 0, 0);
+
        return;
 }
+
+/* This routine releases all resources associated with a specifc NPort's ndlp
+ * and mempool_free's the nodelist.
+ */
+static void
+lpfc_nlp_release(struct kref *kref)
+{
+       struct lpfc_hba *phba;
+       unsigned long flags;
+       struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
+                                                 kref);
+
+       lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+               "node release:    did:x%x flg:x%x type:x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                       "0279 lpfc_nlp_release: ndlp:x%p "
+                       "usgmap:x%x refcnt:%d\n",
+                       (void *)ndlp, ndlp->nlp_usg_map,
+                       atomic_read(&ndlp->kref.refcount));
+
+       /* remove ndlp from action. */
+       lpfc_nlp_remove(ndlp->vport, ndlp);
+
+       /* clear the ndlp active flag for all release cases */
+       phba = ndlp->vport->phba;
+       spin_lock_irqsave(&phba->ndlp_lock, flags);
+       NLP_CLR_NODE_ACT(ndlp);
+       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+       /* free ndlp memory for final ndlp release */
+       if (NLP_CHK_FREE_REQ(ndlp)) {
+               kfree(ndlp->lat_data);
+               mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
+       }
+}
+
+/* This routine bumps the reference count for a ndlp structure to ensure
+ * that one discovery thread won't free a ndlp while another discovery thread
+ * is using it.
+ */
+struct lpfc_nodelist *
+lpfc_nlp_get(struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba;
+       unsigned long flags;
+
+       if (ndlp) {
+               lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+                       "node get:        did:x%x flg:x%x refcnt:x%x",
+                       ndlp->nlp_DID, ndlp->nlp_flag,
+                       atomic_read(&ndlp->kref.refcount));
+               /* The check of ndlp usage to prevent incrementing the
+                * ndlp reference count that is in the process of being
+                * released.
+                */
+               phba = ndlp->vport->phba;
+               spin_lock_irqsave(&phba->ndlp_lock, flags);
+               if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
+                       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+                       lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+                               "0276 lpfc_nlp_get: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+                       return NULL;
+               } else
+                       kref_get(&ndlp->kref);
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+       }
+       return ndlp;
+}
+
+/* This routine decrements the reference count for a ndlp structure. If the
+ * count goes to 0, this indicates the the associated nodelist should be
+ * freed. Returning 1 indicates the ndlp resource has been released; on the
+ * other hand, returning 0 indicates the ndlp resource has not been released
+ * yet.
+ */
+int
+lpfc_nlp_put(struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba;
+       unsigned long flags;
+
+       if (!ndlp)
+               return 1;
+
+       lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+       "node put:        did:x%x flg:x%x refcnt:x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag,
+               atomic_read(&ndlp->kref.refcount));
+       phba = ndlp->vport->phba;
+       spin_lock_irqsave(&phba->ndlp_lock, flags);
+       /* Check the ndlp memory free acknowledge flag to avoid the
+        * possible race condition that kref_put got invoked again
+        * after previous one has done ndlp memory free.
+        */
+       if (NLP_CHK_FREE_ACK(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+                               "0274 lpfc_nlp_put: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return 1;
+       }
+       /* Check the ndlp inactivate log flag to avoid the possible
+        * race condition that kref_put got invoked again after ndlp
+        * is already in inactivating state.
+        */
+       if (NLP_CHK_IACT_REQ(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+                               "0275 lpfc_nlp_put: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return 1;
+       }
+       /* For last put, mark the ndlp usage flags to make sure no
+        * other kref_get and kref_put on the same ndlp shall get
+        * in between the process when the final kref_put has been
+        * invoked on this ndlp.
+        */
+       if (atomic_read(&ndlp->kref.refcount) == 1) {
+               /* Indicate ndlp is put to inactive state. */
+               NLP_SET_IACT_REQ(ndlp);
+               /* Acknowledge ndlp memory free has been seen. */
+               if (NLP_CHK_FREE_REQ(ndlp))
+                       NLP_SET_FREE_ACK(ndlp);
+       }
+       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+       /* Note, the kref_put returns 1 when decrementing a reference
+        * count that was 1, it invokes the release callback function,
+        * but it still left the reference count as 1 (not actually
+        * performs the last decrementation). Otherwise, it actually
+        * decrements the reference count and returns 0.
+        */
+       return kref_put(&ndlp->kref, lpfc_nlp_release);
+}
+
+/* This routine free's the specified nodelist if it is not in use
+ * by any other discovery thread. This routine returns 1 if the
+ * ndlp has been freed. A return value of 0 indicates the ndlp is
+ * not yet been released.
+ */
+int
+lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+{
+       lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+               "node not used:   did:x%x flg:x%x refcnt:x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag,
+               atomic_read(&ndlp->kref.refcount));
+       if (atomic_read(&ndlp->kref.refcount) == 1)
+               if (lpfc_nlp_put(ndlp))
+                       return 1;
+       return 0;
+}