nfsd4: setclientid_confirm callback-change fixes
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_hbadisc.c
index f2b8bc4..311ed6d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -30,6 +30,7 @@
 #include <scsi/scsi_transport_fc.h>
 
 #include "lpfc_hw.h"
+#include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_sli.h"
 #include "lpfc_scsi.h"
@@ -69,7 +70,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
        rdata = rport->dd_data;
        ndlp = rdata->pnode;
 
-       if (!ndlp) {
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
                if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
                        printk(KERN_ERR "Cannot find remote node"
                        " to terminate I/O Data x%x\n",
@@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
                        &phba->sli.ring[phba->sli.fcp_ring],
                        ndlp->nlp_sid, 0, LPFC_CTX_TGT);
        }
-
-       /*
-        * A device is normally blocked for rediscovery and unblocked when
-        * devloss timeout happens.  In case a vport is removed or driver
-        * unloaded before devloss timeout happens, we need to unblock here.
-        */
-       scsi_target_unblock(&rport->dev);
-       return;
 }
 
 /*
@@ -114,15 +107,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 
        rdata = rport->dd_data;
        ndlp = rdata->pnode;
-
-       if (!ndlp) {
-               if (rport->scsi_target_id != -1) {
-                       printk(KERN_ERR "Cannot find remote node"
-                               " for rport in dev_loss_tmo_callbk x%x\n",
-                               rport->port_id);
-               }
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
                return;
-       }
 
        vport = ndlp->vport;
        phba  = vport->phba;
@@ -156,12 +142,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                return;
 
        spin_lock_irq(&phba->hbalock);
-       evtp->evt_arg1  = ndlp;
-       evtp->evt       = LPFC_EVT_DEV_LOSS;
-       list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-
+       /* We need to hold the node by incrementing the reference
+        * count until this queued work is done
+        */
+       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+       if (evtp->evt_arg1) {
+               evtp->evt = LPFC_EVT_DEV_LOSS;
+               list_add_tail(&evtp->evt_listp, &phba->work_list);
+               lpfc_worker_wake_up(phba);
+       }
        spin_unlock_irq(&phba->hbalock);
 
        return;
@@ -202,6 +191,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
         * appropriately we just need to cleanup the ndlp rport info here.
         */
        if (vport->load_flag & FC_UNLOADING) {
+               if (ndlp->nlp_sid != NLP_NO_SID) {
+                       /* flush the target */
+                       lpfc_sli_abort_iocb(vport,
+                                       &phba->sli.ring[phba->sli.fcp_ring],
+                                       ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+               }
                put_node = rdata->pnode != NULL;
                put_rport = ndlp->rport != NULL;
                rdata->pnode = NULL;
@@ -213,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                return;
        }
 
-       if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+       if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                                "0284 Devloss timeout Ignored on "
+                                "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+                                "NPort x%x\n",
+                                *name, *(name+1), *(name+2), *(name+3),
+                                *(name+4), *(name+5), *(name+6), *(name+7),
+                                ndlp->nlp_DID);
                return;
+       }
 
        if (ndlp->nlp_type & NLP_FABRIC) {
                /* We will clean up these Nodes in linkup */
@@ -235,14 +238,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
                                    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
        }
-       if (vport->load_flag & FC_UNLOADING)
-               warn_on = 0;
 
        if (warn_on) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
                                 "0203 Devloss timeout on "
-                                "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
-                                "NPort x%x Data: x%x x%x x%x\n",
+                                "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+                                "NPort x%06x Data: x%x x%x x%x\n",
                                 *name, *(name+1), *(name+2), *(name+3),
                                 *(name+4), *(name+5), *(name+6), *(name+7),
                                 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -250,8 +251,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
        } else {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
                                 "0204 Devloss timeout on "
-                                "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
-                                "NPort x%x Data: x%x x%x x%x\n",
+                                "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+                                "NPort x%06x Data: x%x x%x x%x\n",
                                 *name, *(name+1), *(name+2), *(name+3),
                                 *(name+4), *(name+5), *(name+6), *(name+7),
                                 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -270,16 +271,125 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
        if (!(vport->load_flag & FC_UNLOADING) &&
            !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
            !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
-           (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
+           (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
-       }
 }
 
-
+/**
+ * lpfc_alloc_fast_evt: Allocates data structure for posting event.
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+       struct lpfc_fast_path_event *ret;
+
+       /* If there are lot of fast event do not exhaust memory due to this */
+       if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+               return NULL;
+
+       ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+                       GFP_ATOMIC);
+       if (ret)
+               atomic_inc(&phba->fast_event_count);
+       INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+       ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+       return ret;
+}
+
+/**
+ * lpfc_free_fast_evt: Frees event data structure.
+ * @phba: Pointer to hba context object.
+ * @evt:  Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
 void
-lpfc_worker_wake_up(struct lpfc_hba *phba)
-{
-       wake_up(phba->work_wait);
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+               struct lpfc_fast_path_event *evt) {
+
+       atomic_dec(&phba->fast_event_count);
+       kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt: Posts events generated from fast path.
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+               struct lpfc_work_evt *evtp)
+{
+       unsigned long evt_category, evt_sub_category;
+       struct lpfc_fast_path_event *fast_evt_data;
+       char *evt_data;
+       uint32_t evt_data_size;
+       struct Scsi_Host *shost;
+
+       fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+               work_evt);
+
+       evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+       evt_sub_category = (unsigned long) fast_evt_data->un.
+                       fabric_evt.subcategory;
+       shost = lpfc_shost_from_vport(fast_evt_data->vport);
+       if (evt_category == FC_REG_FABRIC_EVENT) {
+               if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+                       evt_data = (char *) &fast_evt_data->un.read_check_error;
+                       evt_data_size = sizeof(fast_evt_data->un.
+                               read_check_error);
+               } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+                       (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+                       evt_data = (char *) &fast_evt_data->un.fabric_evt;
+                       evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+               } else {
+                       lpfc_free_fast_evt(phba, fast_evt_data);
+                       return;
+               }
+       } else if (evt_category == FC_REG_SCSI_EVENT) {
+               switch (evt_sub_category) {
+               case LPFC_EVENT_QFULL:
+               case LPFC_EVENT_DEVBSY:
+                       evt_data = (char *) &fast_evt_data->un.scsi_evt;
+                       evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+                       break;
+               case LPFC_EVENT_CHECK_COND:
+                       evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+                       evt_data_size =  sizeof(fast_evt_data->un.
+                               check_cond_evt);
+                       break;
+               case LPFC_EVENT_VARQUEDEPTH:
+                       evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+                       evt_data_size = sizeof(fast_evt_data->un.
+                               queue_depth_evt);
+                       break;
+               default:
+                       lpfc_free_fast_evt(phba, fast_evt_data);
+                       return;
+               }
+       } else {
+               lpfc_free_fast_evt(phba, fast_evt_data);
+               return;
+       }
+
+       fc_host_post_vendor_event(shost,
+               fc_get_event_number(),
+               evt_data_size,
+               evt_data,
+               LPFC_NL_VENDOR_ID);
+
+       lpfc_free_fast_evt(phba, fast_evt_data);
        return;
 }
 
@@ -301,12 +411,18 @@ lpfc_work_list_done(struct lpfc_hba *phba)
                        ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
                        lpfc_els_retry_delay_handler(ndlp);
                        free_evt = 0; /* evt is part of ndlp */
+                       /* decrement the node reference count held
+                        * for this queued work
+                        */
+                       lpfc_nlp_put(ndlp);
                        break;
                case LPFC_EVT_DEV_LOSS:
                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-                       lpfc_nlp_get(ndlp);
                        lpfc_dev_loss_tmo_handler(ndlp);
                        free_evt = 0;
+                       /* decrement the node reference count held for
+                        * this queued work
+                        */
                        lpfc_nlp_put(ndlp);
                        break;
                case LPFC_EVT_ONLINE:
@@ -348,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
                        lpfc_unblock_mgmt_io(phba);
                        complete((struct completion *)(evtp->evt_arg2));
                        break;
+               case LPFC_EVT_FASTPATH_MGMT_EVT:
+                       lpfc_send_fastpath_evt(phba, evtp);
+                       free_evt = 0;
+                       break;
                }
                if (free_evt)
                        kfree(evtp);
@@ -372,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
 
        if (ha_copy & HA_ERATT)
+               /* Handle the error attention event */
                lpfc_handle_eratt(phba);
 
        if (ha_copy & HA_MBATT)
@@ -379,9 +500,10 @@ lpfc_work_done(struct lpfc_hba *phba)
 
        if (ha_copy & HA_LATT)
                lpfc_handle_latt(phba);
+
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i < LPFC_MAX_VPORTS; i++) {
+               for(i = 0; i <= phba->max_vpi; i++) {
                        /*
                         * We could have no vports in array if unloading, so if
                         * this happens then just use the pport
@@ -392,7 +514,10 @@ lpfc_work_done(struct lpfc_hba *phba)
                                vport = vports[i];
                        if (vport == NULL)
                                break;
+                       spin_lock_irq(&vport->work_port_lock);
                        work_port_events = vport->work_port_events;
+                       vport->work_port_events &= ~work_port_events;
+                       spin_unlock_irq(&vport->work_port_lock);
                        if (work_port_events & WORKER_DISC_TMO)
                                lpfc_disc_timeout_handler(vport);
                        if (work_port_events & WORKER_ELS_TMO)
@@ -409,11 +534,8 @@ lpfc_work_done(struct lpfc_hba *phba)
                                lpfc_ramp_down_queue_handler(phba);
                        if (work_port_events & WORKER_RAMP_UP_QUEUE)
                                lpfc_ramp_up_queue_handler(phba);
-                       spin_lock_irq(&vport->work_port_lock);
-                       vport->work_port_events &= ~work_port_events;
-                       spin_unlock_irq(&vport->work_port_lock);
                }
-       lpfc_destroy_vport_work_array(vports);
+       lpfc_destroy_vport_work_array(phba, vports);
 
        pring = &phba->sli.ring[LPFC_ELS_RING];
        status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
@@ -422,11 +544,13 @@ lpfc_work_done(struct lpfc_hba *phba)
                || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       /* Set the lpfc data pending flag */
+                       set_bit(LPFC_DATA_READY, &phba->data_flags);
                } else {
+                       pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
                        lpfc_sli_handle_slow_ring_event(phba, pring,
                                                        (status &
                                                         HA_RXMASK));
-                       pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
                }
                /*
                 * Turn on Ring interrupts
@@ -452,67 +576,34 @@ lpfc_work_done(struct lpfc_hba *phba)
        lpfc_work_list_done(phba);
 }
 
-static int
-check_work_wait_done(struct lpfc_hba *phba)
-{
-       struct lpfc_vport *vport;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-       int rc = 0;
-
-       spin_lock_irq(&phba->hbalock);
-       list_for_each_entry(vport, &phba->port_list, listentry) {
-               if (vport->work_port_events) {
-                       rc = 1;
-                       break;
-               }
-       }
-       if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
-           kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
-               rc = 1;
-               phba->work_found++;
-       } else
-               phba->work_found = 0;
-       spin_unlock_irq(&phba->hbalock);
-       return rc;
-}
-
-
 int
 lpfc_do_work(void *p)
 {
        struct lpfc_hba *phba = p;
        int rc;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
 
        set_user_nice(current, -20);
-       phba->work_wait = &work_waitq;
-       phba->work_found = 0;
-
-       while (1) {
-
-               rc = wait_event_interruptible(work_waitq,
-                                             check_work_wait_done(phba));
-
-               BUG_ON(rc);
-
-               if (kthread_should_stop())
+       phba->data_flags = 0;
+
+       while (!kthread_should_stop()) {
+               /* wait and check worker queue activities */
+               rc = wait_event_interruptible(phba->work_waitq,
+                                       (test_and_clear_bit(LPFC_DATA_READY,
+                                                           &phba->data_flags)
+                                        || kthread_should_stop()));
+               /* Signal wakeup shall terminate the worker thread */
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                                       "0433 Wakeup on signal: rc=x%x\n", rc);
                        break;
+               }
 
+               /* Attend pending lpfc data processing */
                lpfc_work_done(phba);
-
-               /* If there is alot of slow ring work, like during link up
-                * check_work_wait_done() may cause this thread to not give
-                * up the CPU for very long periods of time. This may cause
-                * soft lockups or other problems. To avoid these situations
-                * give up the CPU here after LPFC_MAX_WORKER_ITERATION
-                * consecutive iterations.
-                */
-               if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
-                       phba->work_found = 0;
-                       schedule();
-               }
        }
-       phba->work_wait = NULL;
+       phba->worker_thread = NULL;
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "0432 Worker thread stopped.\n");
        return 0;
 }
 
@@ -542,24 +633,26 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 
        spin_lock_irqsave(&phba->hbalock, flags);
        list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               lpfc_worker_wake_up(phba);
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
+       lpfc_worker_wake_up(phba);
+
        return 1;
 }
 
 void
 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 {
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba  *phba = vport->phba;
        struct lpfc_nodelist *ndlp, *next_ndlp;
        int  rc;
 
        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+               if (!NLP_CHK_NODE_ACT(ndlp))
+                       continue;
                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
                        continue;
-
                if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
                        ((vport->port_type == LPFC_NPIV_PORT) &&
                        (ndlp->nlp_DID == NameServer_DID)))
@@ -575,7 +668,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
        }
        if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
                lpfc_mbx_unreg_vpi(vport);
+               spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+               spin_unlock_irq(shost->host_lock);
        }
 }
 
@@ -618,9 +713,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
        LPFC_MBOXQ_t          *mb;
        int i;
 
-       if (phba->link_state == LPFC_LINK_DOWN) {
+       if (phba->link_state == LPFC_LINK_DOWN)
                return 0;
-       }
        spin_lock_irq(&phba->hbalock);
        if (phba->link_state > LPFC_LINK_DOWN) {
                phba->link_state = LPFC_LINK_DOWN;
@@ -629,11 +723,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
                        /* Issue a LINK DOWN event to all nodes */
                        lpfc_linkdown_port(vports[i]);
                }
-       lpfc_destroy_vport_work_array(vports);
+       lpfc_destroy_vport_work_array(phba, vports);
        /* Clean up any firmware default rpi's */
        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mb) {
@@ -673,20 +767,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
        struct lpfc_nodelist *ndlp;
 
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+               if (!NLP_CHK_NODE_ACT(ndlp))
+                       continue;
                if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
                        continue;
-
                if (ndlp->nlp_type & NLP_FABRIC) {
-                               /* On Linkup its safe to clean up the ndlp
-                                * from Fabric connections.
-                                */
+                       /* On Linkup its safe to clean up the ndlp
+                        * from Fabric connections.
+                        */
                        if (ndlp->nlp_DID != Fabric_DID)
                                lpfc_unreg_rpi(vport, ndlp);
                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
                } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
-                               /* Fail outstanding IO now since device is
-                                * marked for PLOGI.
-                                */
+                       /* Fail outstanding IO now since device is
+                        * marked for PLOGI.
+                        */
                        lpfc_unreg_rpi(vport, ndlp);
                }
        }
@@ -738,9 +833,9 @@ lpfc_linkup(struct lpfc_hba *phba)
 
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
+               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
                        lpfc_linkup_port(vports[i]);
-       lpfc_destroy_vport_work_array(vports);
+       lpfc_destroy_vport_work_array(phba, vports);
        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                lpfc_issue_clear_la(phba, phba->pport);
 
@@ -788,21 +883,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        writel(control, phba->HCregaddr);
        readl(phba->HCregaddr); /* flush */
        spin_unlock_irq(&phba->hbalock);
+       mempool_free(pmb, phba->mbox_mem_pool);
        return;
 
-       vport->num_disc_nodes = 0;
-       /* go thru NPR nodes and issue ELS PLOGIs */
-       if (vport->fc_npr_cnt)
-               lpfc_els_disc_plogi(vport);
-
-       if (!vport->num_disc_nodes) {
-               spin_lock_irq(shost->host_lock);
-               vport->fc_flag &= ~FC_NDISC_ACTIVE;
-               spin_unlock_irq(shost->host_lock);
-       }
-
-       vport->port_state = LPFC_VPORT_READY;
-
 out:
        /* Device Discovery completes */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -810,11 +893,9 @@ out:
        mempool_free(pmb, phba->mbox_mem_pool);
 
        spin_lock_irq(shost->host_lock);
-       vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
+       vport->fc_flag &= ~FC_ABORT_DISCOVERY;
        spin_unlock_irq(shost->host_lock);
 
-       del_timer_sync(&phba->fc_estabtmo);
-
        lpfc_can_disctmo(vport);
 
        /* turn on Link Attention interrupts */
@@ -964,6 +1045,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
        if (phba->fc_topology == TOPOLOGY_LOOP) {
                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
+               if (phba->cfg_enable_npiv)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1309 Link Up Event npiv not supported in loop "
+                               "topology\n");
                                /* Get Loop Map information */
                if (la->il)
                        vport->fc_flag |= FC_LBIT;
@@ -1056,14 +1141,10 @@ out:
 }
 
 static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+lpfc_enable_la(struct lpfc_hba *phba)
 {
        uint32_t control;
        struct lpfc_sli *psli = &phba->sli;
-
-       lpfc_linkdown(phba);
-
-       /* turn on Link Attention interrupts - no CLEAR_LA needed */
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag |= LPFC_PROCESS_LA;
        control = readl(phba->HCregaddr);
@@ -1073,6 +1154,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
 }
 
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+       lpfc_linkdown(phba);
+       lpfc_enable_la(phba);
+       /* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
 /*
  * This routine handles processing a READ_LA mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1088,6 +1178,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        MAILBOX_t *mb = &pmb->mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
+       /* Unblock ELS traffic */
+       phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
        /* Check for error */
        if (mb->mbxStatus) {
                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1118,11 +1210,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        }
 
        phba->fc_eventTag = la->eventTag;
+       if (la->mm)
+               phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+       else
+               phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
 
-       if (la->attType == AT_LINK_UP) {
+       if (la->attType == AT_LINK_UP && (!la->mm)) {
                phba->fc_stat.LinkUp++;
                if (phba->link_flag & LS_LOOPBACK_MODE) {
-                       lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                        "1306 Link Up Event in loop back mode "
                                        "x%x received Data: x%x x%x x%x x%x\n",
                                        la->eventTag, phba->fc_eventTag,
@@ -1131,21 +1227,68 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                } else {
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                        "1303 Link Up Event x%x received "
-                                       "Data: x%x x%x x%x x%x\n",
+                                       "Data: x%x x%x x%x x%x x%x x%x %d\n",
                                        la->eventTag, phba->fc_eventTag,
                                        la->granted_AL_PA, la->UlnkSpeed,
-                                       phba->alpa_map[0]);
+                                       phba->alpa_map[0],
+                                       la->mm, la->fa,
+                                       phba->wait_4_mlo_maint_flg);
                }
                lpfc_mbx_process_link_up(phba, la);
-       } else {
+       } else if (la->attType == AT_LINK_DOWN) {
                phba->fc_stat.LinkDown++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-                               "1305 Link Down Event x%x received "
+               if (phba->link_flag & LS_LOOPBACK_MODE) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1308 Link Down Event in loop back mode "
+                               "x%x received "
                                "Data: x%x x%x x%x\n",
                                la->eventTag, phba->fc_eventTag,
                                phba->pport->port_state, vport->fc_flag);
+               }
+               else {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1305 Link Down Event x%x received "
+                               "Data: x%x x%x x%x x%x x%x\n",
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag,
+                               la->mm, la->fa);
+               }
                lpfc_mbx_issue_link_down(phba);
        }
+       if (la->mm && la->attType == AT_LINK_UP) {
+               if (phba->link_state != LPFC_LINK_DOWN) {
+                       phba->fc_stat.LinkDown++;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1312 Link Down Event x%x received "
+                               "Data: x%x x%x x%x\n",
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag);
+                       lpfc_mbx_issue_link_down(phba);
+               } else
+                       lpfc_enable_la(phba);
+
+               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1310 Menlo Maint Mode Link up Event x%x rcvd "
+                               "Data: x%x x%x x%x\n",
+                               la->eventTag, phba->fc_eventTag,
+                               phba->pport->port_state, vport->fc_flag);
+               /*
+                * The cmnd that triggered this will be waiting for this
+                * signal.
+                */
+               /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+               if (phba->wait_4_mlo_maint_flg) {
+                       phba->wait_4_mlo_maint_flg = 0;
+                       wake_up_interruptible(&phba->wait_4_mlo_m_q);
+               }
+       }
+
+       if (la->fa) {
+               if (la->mm)
+                       lpfc_issue_clear_la(phba, vport);
+               lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+                               "1311 fa %d\n", la->fa);
+       }
 
 lpfc_mbx_cmpl_read_la_free_mbuf:
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1174,6 +1317,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
        mempool_free(pmb, phba->mbox_mem_pool);
+       /* decrement the node reference count held for this callback
+        * function.
+        */
        lpfc_nlp_put(ndlp);
 
        return;
@@ -1205,7 +1351,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                scsi_host_put(shost);
 }
 
-void
+int
 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
 {
        struct lpfc_hba  *phba = vport->phba;
@@ -1214,7 +1360,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
-               return;
+               return 1;
 
        lpfc_unreg_vpi(phba, vport->vpi, mbox);
        mbox->vport = vport;
@@ -1225,7 +1371,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
                                 "1800 Could not issue unreg_vpi\n");
                mempool_free(mbox, phba->mbox_mem_pool);
                vport->unreg_vpi_cmpl = VPORT_ERROR;
+               return rc;
        }
+       return 0;
 }
 
 static void
@@ -1291,7 +1439,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
                mempool_free(pmb, phba->mbox_mem_pool);
-               lpfc_nlp_put(ndlp);
 
                if (phba->fc_topology == TOPOLOGY_LOOP) {
                        /* FLOGI failed, use loop map to make discovery list */
@@ -1299,6 +1446,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
                        /* Start discovery */
                        lpfc_disc_start(vport);
+                       /* Decrement the reference count to ndlp after the
+                        * reference to the ndlp are done.
+                        */
+                       lpfc_nlp_put(ndlp);
                        return;
                }
 
@@ -1306,6 +1457,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
                                 "0258 Register Fabric login error: 0x%x\n",
                                 mb->mbxStatus);
+               /* Decrement the reference count to ndlp after the reference
+                * to the ndlp are done.
+                */
+               lpfc_nlp_put(ndlp);
                return;
        }
 
@@ -1313,20 +1468,22 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
-       lpfc_nlp_put(ndlp);     /* Drop the reference from the mbox */
-
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
                vports = lpfc_create_vport_work_array(phba);
                if (vports != NULL)
                        for(i = 0;
-                           i < LPFC_MAX_VPORTS && vports[i] != NULL;
+                           i <= phba->max_vpi && vports[i] != NULL;
                            i++) {
                                if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
                                        continue;
+                               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                                       lpfc_vport_set_state(vports[i],
+                                                       FC_VPORT_LINKDOWN);
+                                       continue;
+                               }
                                if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
                                        lpfc_initial_fdisc(vports[i]);
-                               else if (phba->sli3_options &
-                                               LPFC_SLI3_NPIV_ENABLED) {
+                               else {
                                        lpfc_vport_set_state(vports[i],
                                                FC_VPORT_NO_FABRIC_SUPP);
                                        lpfc_printf_vlog(vport, KERN_ERR,
@@ -1335,13 +1492,18 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                                                        "Fabric support\n");
                                }
                        }
-               lpfc_destroy_vport_work_array(vports);
+               lpfc_destroy_vport_work_array(phba, vports);
                lpfc_do_scr_ns_plogi(phba, vport);
        }
 
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
        mempool_free(pmb, phba->mbox_mem_pool);
+
+       /* Drop the reference count from the mbox at the end after
+        * all the current reference to the ndlp have been done.
+        */
+       lpfc_nlp_put(ndlp);
        return;
 }
 
@@ -1361,6 +1523,9 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        if (mb->mbxStatus) {
 out:
+               /* decrement the node reference count held for this
+                * callback function.
+                */
                lpfc_nlp_put(ndlp);
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
@@ -1412,6 +1577,9 @@ out:
                goto out;
        }
 
+       /* decrement the node reference count held for this
+        * callback function.
+        */
        lpfc_nlp_put(ndlp);
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
@@ -1443,9 +1611,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
         * registered the port.
         */
        if (ndlp->rport && ndlp->rport->dd_data &&
-           ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
+           ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
                lpfc_nlp_put(ndlp);
-       }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
                "rport add:       did:x%x flg:x%x type x%x",
@@ -1562,6 +1729,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                 */
                lpfc_register_remote_port(vport, ndlp);
        }
+       if ((new_state ==  NLP_STE_MAPPED_NODE) &&
+               (vport->stat_data_enabled)) {
+               /*
+                * A new target is discovered, if there is no buffer for
+                * statistical data collection allocate buffer.
+                */
+               ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+                                        sizeof(struct lpfc_scsicmd_bkt),
+                                        GFP_KERNEL);
+
+               if (!ndlp->lat_data)
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+                               "0286 lpfc_nlp_state_cleanup failed to "
+                               "allocate statistical data buffer DID "
+                               "0x%x\n", ndlp->nlp_DID);
+       }
        /*
         * if we added to Mapped list, but the remote port
         * registration failed or assigned a target id outside
@@ -1619,7 +1802,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                ndlp->nlp_DID, old_state, state);
 
        if (old_state == NLP_STE_NPR_NODE &&
-           (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
            state != NLP_STE_NPR_NODE)
                lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1640,32 +1822,132 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 }
 
 void
+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+       if (list_empty(&ndlp->nlp_listp)) {
+               spin_lock_irq(shost->host_lock);
+               list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+               spin_unlock_irq(shost->host_lock);
+       }
+}
+
+void
 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
        spin_lock_irq(shost->host_lock);
        list_del_init(&ndlp->nlp_listp);
        spin_unlock_irq(shost->host_lock);
        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
-                              NLP_STE_UNUSED_NODE);
+                               NLP_STE_UNUSED_NODE);
+}
+
+static void
+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+               lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+       lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+                               NLP_STE_UNUSED_NODE);
+}
+/**
+ * lpfc_initialize_node: Initialize all fields of node object.
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ *     This function is always called when node object need to
+ * be initialized. It initializes all the fields of the node
+ * object.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+       uint32_t did)
+{
+       INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+       INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+       init_timer(&ndlp->nlp_delayfunc);
+       ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+       ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+       ndlp->nlp_DID = did;
+       ndlp->vport = vport;
+       ndlp->nlp_sid = NLP_NO_SID;
+       kref_init(&ndlp->kref);
+       NLP_INT_NODE_ACT(ndlp);
+       atomic_set(&ndlp->cmd_pending, 0);
+       ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+}
+
+struct lpfc_nodelist *
+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                int state)
+{
+       struct lpfc_hba *phba = vport->phba;
+       uint32_t did;
+       unsigned long flags;
+
+       if (!ndlp)
+               return NULL;
+
+       spin_lock_irqsave(&phba->ndlp_lock, flags);
+       /* The ndlp should not be in memory free mode */
+       if (NLP_CHK_FREE_REQ(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0277 lpfc_enable_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return NULL;
+       }
+       /* The ndlp should not already be in active mode */
+       if (NLP_CHK_NODE_ACT(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0278 lpfc_enable_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return NULL;
+       }
+
+       /* Keep the original DID */
+       did = ndlp->nlp_DID;
+
+       /* re-initialize ndlp except of ndlp linked list pointer */
+       memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+               sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+       lpfc_initialize_node(vport, ndlp, did);
+
+       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+       if (state != NLP_STE_UNUSED_NODE)
+               lpfc_nlp_set_state(vport, ndlp, state);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+               "node enable:       did:x%x",
+               ndlp->nlp_DID, 0, 0);
+       return ndlp;
 }
 
 void
 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        /*
-        * Use of lpfc_drop_node and UNUSED list. lpfc_drop_node should
+        * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
         * be used if we wish to issue the "last" lpfc_nlp_put() to remove
-        * the ndlp from the vport.  The ndlp resides on the UNUSED list
-        * until ALL other outstanding threads have completed. Thus, if a
-        * ndlp is on the UNUSED list already, we should never do another
-        * lpfc_drop_node() on it.
+        * the ndlp from the vport. The ndlp marked as UNUSED on the list
+        * until ALL other outstanding threads have completed. We check
+        * that the ndlp not already in the UNUSED state before we proceed.
         */
+       if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+               return;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
        lpfc_nlp_put(ndlp);
        return;
@@ -1682,10 +1964,10 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
        uint32_t tmo;
 
        if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
-               /* For FAN, timeout should be greater then edtov */
+               /* For FAN, timeout should be greater than edtov */
                tmo = (((phba->fc_edtov + 999) / 1000) + 1);
        } else {
-               /* Normal discovery timeout should be > then ELS/CT timeout
+               /* Normal discovery timeout should be > than ELS/CT timeout
                 * FC spec states we need 3 * ratov for CT requests
                 */
                tmo = ((phba->fc_ratov * 3) + 3);
@@ -1902,7 +2184,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
                lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
                mbox->vport = vport;
                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+               mbox->context1 = NULL;
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
                if (rc == MBX_NOT_FINISHED) {
                        mempool_free(mbox, phba->mbox_mem_pool);
                }
@@ -1921,7 +2204,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
                lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
                mbox->vport = vport;
                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+               mbox->context1 = NULL;
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
                if (rc == MBX_NOT_FINISHED) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
                                         "1815 Could not issue "
@@ -1949,7 +2233,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                         "Data: x%x x%x x%x\n",
                         ndlp->nlp_DID, ndlp->nlp_flag,
                         ndlp->nlp_state, ndlp->nlp_rpi);
-       lpfc_dequeue_node(vport, ndlp);
+       if (NLP_CHK_FREE_REQ(ndlp)) {
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0280 lpfc_cleanup_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               lpfc_dequeue_node(vport, ndlp);
+       } else {
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+                               "0281 lpfc_cleanup_node: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               lpfc_disable_node(vport, ndlp);
+       }
 
        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
        if ((mb = phba->sli.mbox_active)) {
@@ -1971,12 +2269,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                        }
                        list_del(&mb->list);
                        mempool_free(mb, phba->mbox_mem_pool);
-                       lpfc_nlp_put(ndlp);
+                       /* We shall not invoke the lpfc_nlp_put to decrement
+                        * the ndlp reference count as we are in the process
+                        * of lpfc_nlp_release.
+                        */
                }
        }
        spin_unlock_irq(&phba->hbalock);
 
-       lpfc_els_abort(phba,ndlp);
+       lpfc_els_abort(phba, ndlp);
+
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag &= ~NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -1984,10 +2286,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        ndlp->nlp_last_elscmd = 0;
        del_timer_sync(&ndlp->nlp_delayfunc);
 
-       if (!list_empty(&ndlp->els_retry_evt.evt_listp))
-               list_del_init(&ndlp->els_retry_evt.evt_listp);
-       if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
-               list_del_init(&ndlp->dev_loss_evt.evt_listp);
+       list_del_init(&ndlp->els_retry_evt.evt_listp);
+       list_del_init(&ndlp->dev_loss_evt.evt_listp);
 
        lpfc_unreg_rpi(vport, ndlp);
 
@@ -2007,10 +2307,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        LPFC_MBOXQ_t *mbox;
        int rc;
 
-       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
-       }
-
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
                /* For this case we need to cleanup the default rpi
                 * allocated by the firmware.
@@ -2026,7 +2323,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
                                mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
                                mbox->vport = vport;
-                               mbox->context2 = 0;
+                               mbox->context2 = NULL;
                                rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
                                if (rc == MBX_NOT_FINISHED) {
                                        mempool_free(mbox, phba->mbox_mem_pool);
@@ -2034,7 +2331,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                        }
                }
        }
-
        lpfc_cleanup_node(vport, ndlp);
 
        /*
@@ -2058,10 +2354,6 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (did == Bcast_DID)
                return 0;
 
-       if (ndlp->nlp_DID == 0) {
-               return 0;
-       }
-
        /* First check for Direct match */
        if (ndlp->nlp_DID == did)
                return 1;
@@ -2159,8 +2451,18 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
                ndlp->nlp_flag |= NLP_NPR_2B_DISC;
                spin_unlock_irq(shost->host_lock);
                return ndlp;
+       } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+               ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
+               if (!ndlp)
+                       return NULL;
+               spin_lock_irq(shost->host_lock);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irq(shost->host_lock);
+               return ndlp;
        }
-       if (vport->fc_flag & FC_RSCN_MODE) {
+
+       if ((vport->fc_flag & FC_RSCN_MODE) &&
+           !(vport->fc_flag & FC_NDISC_ACTIVE)) {
                if (lpfc_rscn_payload_check(vport, did)) {
                        /* If we've already recieved a PLOGI from this NPort
                         * we don't need to try to discover it again.
@@ -2175,8 +2477,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
                        /* Since this node is marked for discovery,
                         * delay timeout is not needed.
                         */
-                       if (ndlp->nlp_flag & NLP_DELAY_TMO)
-                               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+                       lpfc_cancel_retry_delay_tmo(vport, ndlp);
                } else
                        ndlp = NULL;
        } else {
@@ -2340,6 +2641,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
         * continue discovery.
         */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+           !(vport->fc_flag & FC_PT2PT) &&
            !(vport->fc_flag & FC_RSCN_MODE)) {
                lpfc_issue_reg_vpi(phba, vport);
                return;
@@ -2462,6 +2764,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
        if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
                                         nlp_listp) {
+                       if (!NLP_CHK_NODE_ACT(ndlp))
+                               continue;
                        if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
                            ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
                                lpfc_free_tx(phba, ndlp);
@@ -2498,21 +2802,20 @@ lpfc_disc_timeout(unsigned long ptr)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
        struct lpfc_hba   *phba = vport->phba;
+       uint32_t tmo_posted;
        unsigned long flags = 0;
 
        if (unlikely(!phba))
                return;
 
-       if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
-               spin_lock_irqsave(&vport->work_port_lock, flags);
+       spin_lock_irqsave(&vport->work_port_lock, flags);
+       tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+       if (!tmo_posted)
                vport->work_port_events |= WORKER_DISC_TMO;
-               spin_unlock_irqrestore(&vport->work_port_lock, flags);
+       spin_unlock_irqrestore(&vport->work_port_lock, flags);
 
-               spin_lock_irqsave(&phba->hbalock, flags);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, flags);
-       }
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }
 
@@ -2549,6 +2852,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
                /* Start discovery by sending FLOGI, clean up old rpis */
                list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
                                         nlp_listp) {
+                       if (!NLP_CHK_NODE_ACT(ndlp))
+                               continue;
                        if (ndlp->nlp_state != NLP_STE_NPR_NODE)
                                continue;
                        if (ndlp->nlp_type & NLP_FABRIC) {
@@ -2595,7 +2900,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
                                 "NameServer login\n");
                /* Next look for NameServer ndlp */
                ndlp = lpfc_findnode_did(vport, NameServer_DID);
-               if (ndlp)
+               if (ndlp && NLP_CHK_NODE_ACT(ndlp))
                        lpfc_els_abort(phba, ndlp);
 
                /* ReStart discovery */
@@ -2689,7 +2994,7 @@ restart_disc:
 
        default:
                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
-                                "0229 Unexpected discovery timeout, "
+                                "0273 Unexpected discovery timeout, "
                                 "vport State x%x\n", vport->port_state);
                break;
        }
@@ -2702,12 +3007,14 @@ restart_disc:
                clrlaerr = 1;
                break;
 
+       case LPFC_LINK_UP:
+               lpfc_issue_clear_la(phba, vport);
+               /* Drop thru */
        case LPFC_LINK_UNKNOWN:
        case LPFC_WARM_START:
        case LPFC_INIT_START:
        case LPFC_INIT_MBX_CMDS:
        case LPFC_LINK_DOWN:
-       case LPFC_LINK_UP:
        case LPFC_HBA_ERROR:
                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
                                 "0230 Unexpected timeout, hba link "
@@ -2761,7 +3068,9 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        else
                mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
 
-                               /* Mailbox took a reference to the node */
+       /* decrement the node reference count held for this callback
+        * function.
+        */
        lpfc_nlp_put(ndlp);
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
@@ -2797,24 +3106,6 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
        return NULL;
 }
 
-#if 0
-/*
- * Search node lists for a remote port matching filter criteria
- * Caller needs to hold host_lock before calling this routine.
- */
-struct lpfc_nodelist *
-lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
-{
-       struct Scsi_Host     *shost = lpfc_shost_from_vport(vport);
-       struct lpfc_nodelist *ndlp;
-
-       spin_lock_irq(shost->host_lock);
-       ndlp = __lpfc_find_node(vport, filter, param);
-       spin_unlock_irq(shost->host_lock);
-       return ndlp;
-}
-#endif  /*  0  */
-
 /*
  * This routine looks up the ndlp lists for the given RPI. If rpi found it
  * returns the node list element pointer else return NULL.
@@ -2825,20 +3116,6 @@ __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
        return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
 }
 
-#if 0
-struct lpfc_nodelist *
-lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
-{
-       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-       struct lpfc_nodelist *ndlp;
-
-       spin_lock_irq(shost->host_lock);
-       ndlp = __lpfc_findnode_rpi(vport, rpi);
-       spin_unlock_irq(shost->host_lock);
-       return ndlp;
-}
-#endif  /*  0  */
-
 /*
  * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
  * returns the node element list pointer else return NULL.
@@ -2860,16 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
              uint32_t did)
 {
        memset(ndlp, 0, sizeof (struct lpfc_nodelist));
-       INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
-       INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
-       init_timer(&ndlp->nlp_delayfunc);
-       ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
-       ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
-       ndlp->nlp_DID = did;
-       ndlp->vport = vport;
-       ndlp->nlp_sid = NLP_NO_SID;
+
+       lpfc_initialize_node(vport, ndlp, did);
        INIT_LIST_HEAD(&ndlp->nlp_listp);
-       kref_init(&ndlp->kref);
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
                "node init:       did:x%x",
@@ -2884,6 +3154,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 static void
 lpfc_nlp_release(struct kref *kref)
 {
+       struct lpfc_hba *phba;
+       unsigned long flags;
        struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
                                                  kref);
 
@@ -2891,8 +3163,26 @@ lpfc_nlp_release(struct kref *kref)
                "node release:    did:x%x flg:x%x type:x%x",
                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
 
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                       "0279 lpfc_nlp_release: ndlp:x%p "
+                       "usgmap:x%x refcnt:%d\n",
+                       (void *)ndlp, ndlp->nlp_usg_map,
+                       atomic_read(&ndlp->kref.refcount));
+
+       /* remove ndlp from action. */
        lpfc_nlp_remove(ndlp->vport, ndlp);
-       mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
+
+       /* clear the ndlp active flag for all release cases */
+       phba = ndlp->vport->phba;
+       spin_lock_irqsave(&phba->ndlp_lock, flags);
+       NLP_CLR_NODE_ACT(ndlp);
+       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+       /* free ndlp memory for final ndlp release */
+       if (NLP_CHK_FREE_REQ(ndlp)) {
+               kfree(ndlp->lat_data);
+               mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
+       }
 }
 
 /* This routine bumps the reference count for a ndlp structure to ensure
@@ -2902,37 +3192,108 @@ lpfc_nlp_release(struct kref *kref)
 struct lpfc_nodelist *
 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
 {
+       struct lpfc_hba *phba;
+       unsigned long flags;
+
        if (ndlp) {
                lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
                        "node get:        did:x%x flg:x%x refcnt:x%x",
                        ndlp->nlp_DID, ndlp->nlp_flag,
                        atomic_read(&ndlp->kref.refcount));
-               kref_get(&ndlp->kref);
+               /* The check of ndlp usage to prevent incrementing the
+                * ndlp reference count that is in the process of being
+                * released.
+                */
+               phba = ndlp->vport->phba;
+               spin_lock_irqsave(&phba->ndlp_lock, flags);
+               if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
+                       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+                       lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+                               "0276 lpfc_nlp_get: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+                       return NULL;
+               } else
+                       kref_get(&ndlp->kref);
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
        }
        return ndlp;
 }
 
-
 /* This routine decrements the reference count for a ndlp structure. If the
- * count goes to 0, this indicates the the associated nodelist should be freed.
+ * count goes to 0, this indicates the the associated nodelist should be
+ * freed. Returning 1 indicates the ndlp resource has been released; on the
+ * other hand, returning 0 indicates the ndlp resource has not been released
+ * yet.
  */
 int
 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
 {
-       if (ndlp) {
-               lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
-               "node put:        did:x%x flg:x%x refcnt:x%x",
-                       ndlp->nlp_DID, ndlp->nlp_flag,
-                       atomic_read(&ndlp->kref.refcount));
+       struct lpfc_hba *phba;
+       unsigned long flags;
+
+       if (!ndlp)
+               return 1;
+
+       lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+       "node put:        did:x%x flg:x%x refcnt:x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag,
+               atomic_read(&ndlp->kref.refcount));
+       phba = ndlp->vport->phba;
+       spin_lock_irqsave(&phba->ndlp_lock, flags);
+       /* Check the ndlp memory free acknowledge flag to avoid the
+        * possible race condition that kref_put got invoked again
+        * after previous one has done ndlp memory free.
+        */
+       if (NLP_CHK_FREE_ACK(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+                               "0274 lpfc_nlp_put: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return 1;
+       }
+       /* Check the ndlp inactivate log flag to avoid the possible
+        * race condition that kref_put got invoked again after ndlp
+        * is already in inactivating state.
+        */
+       if (NLP_CHK_IACT_REQ(ndlp)) {
+               spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+               lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+                               "0275 lpfc_nlp_put: ndlp:x%p "
+                               "usgmap:x%x refcnt:%d\n",
+                               (void *)ndlp, ndlp->nlp_usg_map,
+                               atomic_read(&ndlp->kref.refcount));
+               return 1;
        }
-       return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
+       /* For last put, mark the ndlp usage flags to make sure no
+        * other kref_get and kref_put on the same ndlp shall get
+        * in between the process when the final kref_put has been
+        * invoked on this ndlp.
+        */
+       if (atomic_read(&ndlp->kref.refcount) == 1) {
+               /* Indicate ndlp is put to inactive state. */
+               NLP_SET_IACT_REQ(ndlp);
+               /* Acknowledge ndlp memory free has been seen. */
+               if (NLP_CHK_FREE_REQ(ndlp))
+                       NLP_SET_FREE_ACK(ndlp);
+       }
+       spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+       /* Note, the kref_put returns 1 when decrementing a reference
+        * count that was 1, it invokes the release callback function,
+        * but it still left the reference count as 1 (not actually
+        * performs the last decrementation). Otherwise, it actually
+        * decrements the reference count and returns 0.
+        */
+       return kref_put(&ndlp->kref, lpfc_nlp_release);
 }
 
 /* This routine free's the specified nodelist if it is not in use
- * by any other discovery thread. This routine returns 1 if the ndlp
- * is not being used by anyone and has been freed. A return value of
- * 0 indicates it is being used by another discovery thread and the
- * refcount is left unchanged.
+ * by any other discovery thread. This routine returns 1 if the
+ * ndlp has been freed. A return value of 0 indicates the ndlp is
+ * not yet been released.
  */
 int
 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
@@ -2941,11 +3302,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
                "node not used:   did:x%x flg:x%x refcnt:x%x",
                ndlp->nlp_DID, ndlp->nlp_flag,
                atomic_read(&ndlp->kref.refcount));
-
-       if (atomic_read(&ndlp->kref.refcount) == 1) {
-               lpfc_nlp_put(ndlp);
-               return 1;
-       }
+       if (atomic_read(&ndlp->kref.refcount) == 1)
+               if (lpfc_nlp_put(ndlp))
+                       return 1;
        return 0;
 }
-