1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
60 lpfc_terminate_rport_io(struct fc_rport *rport)
62 struct lpfc_rport_data *rdata;
63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
66 rdata = rport->dd_data;
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
77 phba = ndlp->nlp_phba;
79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
84 spin_unlock_irq(phba->host->host_lock);
90 * This function will be called when dev_loss_tmo fire.
93 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp;
99 struct lpfc_hba *phba;
101 rdata = rport->dd_data;
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
106 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
112 name = (uint8_t *)&ndlp->nlp_portname;
113 phba = ndlp->nlp_phba;
115 spin_lock_irq(phba->host->host_lock);
117 if (ndlp->nlp_sid != NLP_NO_SID) {
119 /* flush the target */
120 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
121 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
123 if (phba->fc_flag & FC_UNLOADING)
126 spin_unlock_irq(phba->host->host_lock);
129 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
130 "%d:0203 Devloss timeout on "
131 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
132 "NPort x%x Data: x%x x%x x%x\n",
134 *name, *(name+1), *(name+2), *(name+3),
135 *(name+4), *(name+5), *(name+6), *(name+7),
136 ndlp->nlp_DID, ndlp->nlp_flag,
137 ndlp->nlp_state, ndlp->nlp_rpi);
139 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
140 "%d:0204 Devloss timeout on "
141 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
142 "NPort x%x Data: x%x x%x x%x\n",
144 *name, *(name+1), *(name+2), *(name+3),
145 *(name+4), *(name+5), *(name+6), *(name+7),
146 ndlp->nlp_DID, ndlp->nlp_flag,
147 ndlp->nlp_state, ndlp->nlp_rpi);
150 if (!(phba->fc_flag & FC_UNLOADING) &&
151 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
152 !(ndlp->nlp_flag & NLP_NPR_2B_DISC))
153 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
163 lpfc_work_list_done(struct lpfc_hba * phba)
165 struct lpfc_work_evt *evtp = NULL;
166 struct lpfc_nodelist *ndlp;
169 spin_lock_irq(phba->host->host_lock);
170 while(!list_empty(&phba->work_list)) {
171 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
173 spin_unlock_irq(phba->host->host_lock);
176 case LPFC_EVT_ELS_RETRY:
177 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
178 lpfc_els_retry_delay_handler(ndlp);
181 case LPFC_EVT_ONLINE:
182 if (phba->hba_state < LPFC_LINK_DOWN)
183 *(int *)(evtp->evt_arg1) = lpfc_online(phba);
185 *(int *)(evtp->evt_arg1) = 0;
186 complete((struct completion *)(evtp->evt_arg2));
188 case LPFC_EVT_OFFLINE_PREP:
189 if (phba->hba_state >= LPFC_LINK_DOWN)
190 lpfc_offline_prep(phba);
191 *(int *)(evtp->evt_arg1) = 0;
192 complete((struct completion *)(evtp->evt_arg2));
194 case LPFC_EVT_OFFLINE:
196 lpfc_sli_brdrestart(phba);
197 *(int *)(evtp->evt_arg1) =
198 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
199 lpfc_unblock_mgmt_io(phba);
200 complete((struct completion *)(evtp->evt_arg2));
202 case LPFC_EVT_WARM_START:
204 lpfc_reset_barrier(phba);
205 lpfc_sli_brdreset(phba);
206 lpfc_hba_down_post(phba);
207 *(int *)(evtp->evt_arg1) =
208 lpfc_sli_brdready(phba, HS_MBRDY);
209 lpfc_unblock_mgmt_io(phba);
210 complete((struct completion *)(evtp->evt_arg2));
214 *(int *)(evtp->evt_arg1)
215 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
216 lpfc_unblock_mgmt_io(phba);
217 complete((struct completion *)(evtp->evt_arg2));
222 spin_lock_irq(phba->host->host_lock);
224 spin_unlock_irq(phba->host->host_lock);
229 lpfc_work_done(struct lpfc_hba * phba)
231 struct lpfc_sli_ring *pring;
235 uint32_t work_hba_events;
237 spin_lock_irq(phba->host->host_lock);
238 ha_copy = phba->work_ha;
240 work_hba_events=phba->work_hba_events;
241 spin_unlock_irq(phba->host->host_lock);
243 if (ha_copy & HA_ERATT)
244 lpfc_handle_eratt(phba);
246 if (ha_copy & HA_MBATT)
247 lpfc_sli_handle_mb_event(phba);
249 if (ha_copy & HA_LATT)
250 lpfc_handle_latt(phba);
252 if (work_hba_events & WORKER_DISC_TMO)
253 lpfc_disc_timeout_handler(phba);
255 if (work_hba_events & WORKER_ELS_TMO)
256 lpfc_els_timeout_handler(phba);
258 if (work_hba_events & WORKER_MBOX_TMO)
259 lpfc_mbox_timeout_handler(phba);
261 if (work_hba_events & WORKER_FDMI_TMO)
262 lpfc_fdmi_tmo_handler(phba);
264 spin_lock_irq(phba->host->host_lock);
265 phba->work_hba_events &= ~work_hba_events;
266 spin_unlock_irq(phba->host->host_lock);
268 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
269 pring = &phba->sli.ring[i];
270 if ((ha_copy & HA_RXATT)
271 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
272 if (pring->flag & LPFC_STOP_IOCB_MASK) {
273 pring->flag |= LPFC_DEFERRED_RING_EVENT;
275 lpfc_sli_handle_slow_ring_event(phba, pring,
278 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
281 * Turn on Ring interrupts
283 spin_lock_irq(phba->host->host_lock);
284 control = readl(phba->HCregaddr);
285 control |= (HC_R0INT_ENA << i);
286 writel(control, phba->HCregaddr);
287 readl(phba->HCregaddr); /* flush */
288 spin_unlock_irq(phba->host->host_lock);
292 lpfc_work_list_done (phba);
297 check_work_wait_done(struct lpfc_hba *phba) {
299 spin_lock_irq(phba->host->host_lock);
301 phba->work_hba_events ||
302 (!list_empty(&phba->work_list)) ||
303 kthread_should_stop()) {
304 spin_unlock_irq(phba->host->host_lock);
307 spin_unlock_irq(phba->host->host_lock);
313 lpfc_do_work(void *p)
315 struct lpfc_hba *phba = p;
317 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
319 set_user_nice(current, -20);
320 phba->work_wait = &work_waitq;
324 rc = wait_event_interruptible(work_waitq,
325 check_work_wait_done(phba));
328 if (kthread_should_stop())
331 lpfc_work_done(phba);
334 phba->work_wait = NULL;
339 * This is only called to handle FC worker events. Since this a rare
340 * occurance, we allocate a struct lpfc_work_evt structure here instead of
341 * embedding it in the IOCB.
344 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
347 struct lpfc_work_evt *evtp;
350 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
351 * be queued to worker thread for processing
353 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
357 evtp->evt_arg1 = arg1;
358 evtp->evt_arg2 = arg2;
361 spin_lock_irq(phba->host->host_lock);
362 list_add_tail(&evtp->evt_listp, &phba->work_list);
364 wake_up(phba->work_wait);
365 spin_unlock_irq(phba->host->host_lock);
371 lpfc_linkdown(struct lpfc_hba * phba)
373 struct lpfc_sli *psli;
374 struct lpfc_nodelist *ndlp, *next_ndlp;
375 struct list_head *listp, *node_list[7];
380 /* sysfs or selective reset may call this routine to clean up */
381 if (phba->hba_state >= LPFC_LINK_DOWN) {
382 if (phba->hba_state == LPFC_LINK_DOWN)
385 spin_lock_irq(phba->host->host_lock);
386 phba->hba_state = LPFC_LINK_DOWN;
387 spin_unlock_irq(phba->host->host_lock);
390 fc_host_post_event(phba->host, fc_get_event_number(),
391 FCH_EVT_LINKDOWN, 0);
393 /* Clean up any firmware default rpi's */
394 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
395 lpfc_unreg_did(phba, 0xffffffff, mb);
396 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
397 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
398 == MBX_NOT_FINISHED) {
399 mempool_free( mb, phba->mbox_mem_pool);
403 /* Cleanup any outstanding RSCN activity */
404 lpfc_els_flush_rscn(phba);
406 /* Cleanup any outstanding ELS commands */
407 lpfc_els_flush_cmd(phba);
409 /* Issue a LINK DOWN event to all nodes */
410 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
411 node_list[1] = &phba->fc_nlpmap_list;
412 node_list[2] = &phba->fc_nlpunmap_list;
413 node_list[3] = &phba->fc_prli_list;
414 node_list[4] = &phba->fc_reglogin_list;
415 node_list[5] = &phba->fc_adisc_list;
416 node_list[6] = &phba->fc_plogi_list;
417 for (i = 0; i < 7; i++) {
418 listp = node_list[i];
419 if (list_empty(listp))
422 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
424 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
425 NLP_EVT_DEVICE_RECOVERY);
430 /* free any ndlp's on unused list */
431 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
433 lpfc_drop_node(phba, ndlp);
435 /* Setup myDID for link up if we are in pt2pt mode */
436 if (phba->fc_flag & FC_PT2PT) {
438 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
439 lpfc_config_link(phba, mb);
440 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
441 if (lpfc_sli_issue_mbox
442 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
443 == MBX_NOT_FINISHED) {
444 mempool_free( mb, phba->mbox_mem_pool);
447 spin_lock_irq(phba->host->host_lock);
448 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
449 spin_unlock_irq(phba->host->host_lock);
451 spin_lock_irq(phba->host->host_lock);
452 phba->fc_flag &= ~FC_LBIT;
453 spin_unlock_irq(phba->host->host_lock);
455 /* Turn off discovery timer if its running */
456 lpfc_can_disctmo(phba);
458 /* Must process IOCBs on all rings to handle ABORTed I/Os */
463 lpfc_linkup(struct lpfc_hba * phba)
465 struct lpfc_nodelist *ndlp, *next_ndlp;
466 struct list_head *listp, *node_list[7];
469 fc_host_post_event(phba->host, fc_get_event_number(),
472 spin_lock_irq(phba->host->host_lock);
473 phba->hba_state = LPFC_LINK_UP;
474 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
475 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
476 phba->fc_flag |= FC_NDISC_ACTIVE;
477 phba->fc_ns_retry = 0;
478 spin_unlock_irq(phba->host->host_lock);
481 node_list[0] = &phba->fc_plogi_list;
482 node_list[1] = &phba->fc_adisc_list;
483 node_list[2] = &phba->fc_reglogin_list;
484 node_list[3] = &phba->fc_prli_list;
485 node_list[4] = &phba->fc_nlpunmap_list;
486 node_list[5] = &phba->fc_nlpmap_list;
487 node_list[6] = &phba->fc_npr_list;
488 for (i = 0; i < 7; i++) {
489 listp = node_list[i];
490 if (list_empty(listp))
493 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
494 if (phba->fc_flag & FC_LBIT) {
495 if (ndlp->nlp_type & NLP_FABRIC) {
496 /* On Linkup its safe to clean up the
497 * ndlp from Fabric connections.
499 lpfc_nlp_set_state(phba, ndlp,
500 NLP_STE_UNUSED_NODE);
501 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
502 /* Fail outstanding IO now since device
503 * is marked for PLOGI.
505 lpfc_unreg_rpi(phba, ndlp);
511 /* free any ndlp's on unused list */
512 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
514 lpfc_drop_node(phba, ndlp);
520 * This routine handles processing a CLEAR_LA mailbox
521 * command upon completion. It is setup in the LPFC_MBOXQ
522 * as the completion routine when the command is
523 * handed off to the SLI layer.
526 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
528 struct lpfc_sli *psli;
534 /* Since we don't do discovery right now, turn these off here */
535 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
536 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
537 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
539 /* Check for error */
540 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
541 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
542 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
543 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
545 phba->brd_no, mb->mbxStatus, phba->hba_state);
547 phba->hba_state = LPFC_HBA_ERROR;
551 if (phba->fc_flag & FC_ABORT_DISCOVERY)
554 phba->num_disc_nodes = 0;
555 /* go thru NPR list and issue ELS PLOGIs */
556 if (phba->fc_npr_cnt) {
557 lpfc_els_disc_plogi(phba);
560 if (!phba->num_disc_nodes) {
561 spin_lock_irq(phba->host->host_lock);
562 phba->fc_flag &= ~FC_NDISC_ACTIVE;
563 spin_unlock_irq(phba->host->host_lock);
566 phba->hba_state = LPFC_HBA_READY;
569 /* Device Discovery completes */
570 lpfc_printf_log(phba,
573 "%d:0225 Device Discovery completes\n",
576 mempool_free( pmb, phba->mbox_mem_pool);
578 spin_lock_irq(phba->host->host_lock);
579 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
580 if (phba->fc_flag & FC_ESTABLISH_LINK) {
581 phba->fc_flag &= ~FC_ESTABLISH_LINK;
583 spin_unlock_irq(phba->host->host_lock);
585 del_timer_sync(&phba->fc_estabtmo);
587 lpfc_can_disctmo(phba);
589 /* turn on Link Attention interrupts */
590 spin_lock_irq(phba->host->host_lock);
591 psli->sli_flag |= LPFC_PROCESS_LA;
592 control = readl(phba->HCregaddr);
593 control |= HC_LAINT_ENA;
594 writel(control, phba->HCregaddr);
595 readl(phba->HCregaddr); /* flush */
596 spin_unlock_irq(phba->host->host_lock);
602 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
604 struct lpfc_sli *psli = &phba->sli;
607 if (pmb->mb.mbxStatus)
610 mempool_free(pmb, phba->mbox_mem_pool);
612 if (phba->fc_topology == TOPOLOGY_LOOP &&
613 phba->fc_flag & FC_PUBLIC_LOOP &&
614 !(phba->fc_flag & FC_LBIT)) {
615 /* Need to wait for FAN - use discovery timer
616 * for timeout. hba_state is identically
617 * LPFC_LOCAL_CFG_LINK while waiting for FAN
619 lpfc_set_disctmo(phba);
623 /* Start discovery by sending a FLOGI. hba_state is identically
624 * LPFC_FLOGI while waiting for FLOGI cmpl
626 phba->hba_state = LPFC_FLOGI;
627 lpfc_set_disctmo(phba);
628 lpfc_initial_flogi(phba);
632 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
633 "%d:0306 CONFIG_LINK mbxStatus error x%x "
635 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
639 phba->hba_state = LPFC_HBA_ERROR;
641 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
642 "%d:0200 CONFIG_LINK bad hba state x%x\n",
643 phba->brd_no, phba->hba_state);
645 lpfc_clear_la(phba, pmb);
646 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
647 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
648 if (rc == MBX_NOT_FINISHED) {
649 mempool_free(pmb, phba->mbox_mem_pool);
650 lpfc_disc_flush_list(phba);
651 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
652 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
653 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
654 phba->hba_state = LPFC_HBA_READY;
660 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
662 struct lpfc_sli *psli = &phba->sli;
663 MAILBOX_t *mb = &pmb->mb;
664 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
667 /* Check for error */
669 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
670 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
671 "%d:0319 READ_SPARAM mbxStatus error x%x "
673 phba->brd_no, mb->mbxStatus, phba->hba_state);
676 phba->hba_state = LPFC_HBA_ERROR;
680 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
681 sizeof (struct serv_parm));
682 if (phba->cfg_soft_wwnn)
683 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
684 if (phba->cfg_soft_wwpn)
685 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
686 memcpy((uint8_t *) & phba->fc_nodename,
687 (uint8_t *) & phba->fc_sparam.nodeName,
688 sizeof (struct lpfc_name));
689 memcpy((uint8_t *) & phba->fc_portname,
690 (uint8_t *) & phba->fc_sparam.portName,
691 sizeof (struct lpfc_name));
692 lpfc_mbuf_free(phba, mp->virt, mp->phys);
694 mempool_free( pmb, phba->mbox_mem_pool);
698 pmb->context1 = NULL;
699 lpfc_mbuf_free(phba, mp->virt, mp->phys);
701 if (phba->hba_state != LPFC_CLEAR_LA) {
702 lpfc_clear_la(phba, pmb);
703 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
704 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
705 == MBX_NOT_FINISHED) {
706 mempool_free( pmb, phba->mbox_mem_pool);
707 lpfc_disc_flush_list(phba);
708 psli->ring[(psli->extra_ring)].flag &=
709 ~LPFC_STOP_IOCB_EVENT;
710 psli->ring[(psli->fcp_ring)].flag &=
711 ~LPFC_STOP_IOCB_EVENT;
712 psli->ring[(psli->next_ring)].flag &=
713 ~LPFC_STOP_IOCB_EVENT;
714 phba->hba_state = LPFC_HBA_READY;
717 mempool_free( pmb, phba->mbox_mem_pool);
723 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
726 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
727 struct lpfc_dmabuf *mp;
730 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
731 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
733 spin_lock_irq(phba->host->host_lock);
734 switch (la->UlnkSpeed) {
736 phba->fc_linkspeed = LA_1GHZ_LINK;
739 phba->fc_linkspeed = LA_2GHZ_LINK;
742 phba->fc_linkspeed = LA_4GHZ_LINK;
745 phba->fc_linkspeed = LA_UNKNW_LINK;
749 phba->fc_topology = la->topology;
751 if (phba->fc_topology == TOPOLOGY_LOOP) {
752 /* Get Loop Map information */
755 phba->fc_flag |= FC_LBIT;
757 phba->fc_myDID = la->granted_AL_PA;
758 i = la->un.lilpBde64.tus.f.bdeSize;
761 phba->alpa_map[0] = 0;
763 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
774 numalpa = phba->alpa_map[0];
776 while (j < numalpa) {
777 memset(un.pamap, 0, 16);
778 for (k = 1; j < numalpa; k++) {
780 phba->alpa_map[j + 1];
785 /* Link Up Event ALPA map */
786 lpfc_printf_log(phba,
789 "%d:1304 Link Up Event "
790 "ALPA map Data: x%x "
793 un.pa.wd1, un.pa.wd2,
794 un.pa.wd3, un.pa.wd4);
799 phba->fc_myDID = phba->fc_pref_DID;
800 phba->fc_flag |= FC_LBIT;
802 spin_unlock_irq(phba->host->host_lock);
806 lpfc_read_sparam(phba, sparam_mbox);
807 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
808 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
809 (MBX_NOWAIT | MBX_STOP_IOCB));
810 if (rc == MBX_NOT_FINISHED) {
811 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
812 lpfc_mbuf_free(phba, mp->virt, mp->phys);
814 mempool_free(sparam_mbox, phba->mbox_mem_pool);
816 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
822 phba->hba_state = LPFC_LOCAL_CFG_LINK;
823 lpfc_config_link(phba, cfglink_mbox);
824 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
825 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
826 (MBX_NOWAIT | MBX_STOP_IOCB));
827 if (rc == MBX_NOT_FINISHED)
828 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
833 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
835 struct lpfc_sli *psli = &phba->sli;
839 /* turn on Link Attention interrupts - no CLEAR_LA needed */
840 spin_lock_irq(phba->host->host_lock);
841 psli->sli_flag |= LPFC_PROCESS_LA;
842 control = readl(phba->HCregaddr);
843 control |= HC_LAINT_ENA;
844 writel(control, phba->HCregaddr);
845 readl(phba->HCregaddr); /* flush */
846 spin_unlock_irq(phba->host->host_lock);
850 * This routine handles processing a READ_LA mailbox
851 * command upon completion. It is setup in the LPFC_MBOXQ
852 * as the completion routine when the command is
853 * handed off to the SLI layer.
856 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
859 MAILBOX_t *mb = &pmb->mb;
860 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
862 /* Check for error */
864 lpfc_printf_log(phba,
867 "%d:1307 READ_LA mbox error x%x state x%x\n",
869 mb->mbxStatus, phba->hba_state);
870 lpfc_mbx_issue_link_down(phba);
871 phba->hba_state = LPFC_HBA_ERROR;
872 goto lpfc_mbx_cmpl_read_la_free_mbuf;
875 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
877 memcpy(&phba->alpa_map[0], mp->virt, 128);
879 spin_lock_irq(phba->host->host_lock);
881 phba->fc_flag |= FC_BYPASSED_MODE;
883 phba->fc_flag &= ~FC_BYPASSED_MODE;
884 spin_unlock_irq(phba->host->host_lock);
886 if (((phba->fc_eventTag + 1) < la->eventTag) ||
887 (phba->fc_eventTag == la->eventTag)) {
888 phba->fc_stat.LinkMultiEvent++;
889 if (la->attType == AT_LINK_UP) {
890 if (phba->fc_eventTag != 0)
895 phba->fc_eventTag = la->eventTag;
897 if (la->attType == AT_LINK_UP) {
898 phba->fc_stat.LinkUp++;
899 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
900 "%d:1303 Link Up Event x%x received "
901 "Data: x%x x%x x%x x%x\n",
902 phba->brd_no, la->eventTag, phba->fc_eventTag,
903 la->granted_AL_PA, la->UlnkSpeed,
905 lpfc_mbx_process_link_up(phba, la);
907 phba->fc_stat.LinkDown++;
908 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
909 "%d:1305 Link Down Event x%x received "
910 "Data: x%x x%x x%x\n",
911 phba->brd_no, la->eventTag, phba->fc_eventTag,
912 phba->hba_state, phba->fc_flag);
913 lpfc_mbx_issue_link_down(phba);
916 lpfc_mbx_cmpl_read_la_free_mbuf:
917 lpfc_mbuf_free(phba, mp->virt, mp->phys);
919 mempool_free(pmb, phba->mbox_mem_pool);
924 * This routine handles processing a REG_LOGIN mailbox
925 * command upon completion. It is setup in the LPFC_MBOXQ
926 * as the completion routine when the command is
927 * handed off to the SLI layer.
930 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
932 struct lpfc_sli *psli;
934 struct lpfc_dmabuf *mp;
935 struct lpfc_nodelist *ndlp;
940 ndlp = (struct lpfc_nodelist *) pmb->context2;
941 mp = (struct lpfc_dmabuf *) (pmb->context1);
943 pmb->context1 = NULL;
945 /* Good status, call state machine */
946 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
947 lpfc_mbuf_free(phba, mp->virt, mp->phys);
949 mempool_free( pmb, phba->mbox_mem_pool);
955 * This routine handles processing a Fabric REG_LOGIN mailbox
956 * command upon completion. It is setup in the LPFC_MBOXQ
957 * as the completion routine when the command is
958 * handed off to the SLI layer.
961 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
963 struct lpfc_sli *psli;
965 struct lpfc_dmabuf *mp;
966 struct lpfc_nodelist *ndlp;
967 struct lpfc_nodelist *ndlp_fdmi;
973 ndlp = (struct lpfc_nodelist *) pmb->context2;
974 mp = (struct lpfc_dmabuf *) (pmb->context1);
977 lpfc_mbuf_free(phba, mp->virt, mp->phys);
979 mempool_free( pmb, phba->mbox_mem_pool);
980 mempool_free( ndlp, phba->nlp_mem_pool);
982 /* FLOGI failed, so just use loop map to make discovery list */
983 lpfc_disc_list_loopmap(phba);
985 /* Start discovery */
986 lpfc_disc_start(phba);
990 pmb->context1 = NULL;
992 ndlp->nlp_rpi = mb->un.varWords[0];
993 ndlp->nlp_type |= NLP_FABRIC;
994 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
996 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
997 /* This NPort has been assigned an NPort_ID by the fabric as a
998 * result of the completed fabric login. Issue a State Change
999 * Registration (SCR) ELS request to the fabric controller
1000 * (SCR_DID) so that this NPort gets RSCN events from the
1003 lpfc_issue_els_scr(phba, SCR_DID, 0);
1005 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
1007 /* Allocate a new node instance. If the pool is empty,
1008 * start the discovery process and skip the Nameserver
1009 * login process. This is attempted again later on.
1010 * Otherwise, issue a Port Login (PLOGI) to NameServer.
1012 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1014 lpfc_disc_start(phba);
1015 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1017 mempool_free( pmb, phba->mbox_mem_pool);
1020 lpfc_nlp_init(phba, ndlp, NameServer_DID);
1021 ndlp->nlp_type |= NLP_FABRIC;
1024 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1025 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
1026 if (phba->cfg_fdmi_on) {
1027 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
1030 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
1031 ndlp_fdmi->nlp_type |= NLP_FABRIC;
1032 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
1033 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
1038 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1040 mempool_free( pmb, phba->mbox_mem_pool);
1045 * This routine handles processing a NameServer REG_LOGIN mailbox
1046 * command upon completion. It is setup in the LPFC_MBOXQ
1047 * as the completion routine when the command is
1048 * handed off to the SLI layer.
1051 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1053 struct lpfc_sli *psli;
1055 struct lpfc_dmabuf *mp;
1056 struct lpfc_nodelist *ndlp;
1061 ndlp = (struct lpfc_nodelist *) pmb->context2;
1062 mp = (struct lpfc_dmabuf *) (pmb->context1);
1064 if (mb->mbxStatus) {
1065 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1067 mempool_free(pmb, phba->mbox_mem_pool);
1068 lpfc_drop_node(phba, ndlp);
1070 /* RegLogin failed, so just use loop map to make discovery
1072 lpfc_disc_list_loopmap(phba);
1074 /* Start discovery */
1075 lpfc_disc_start(phba);
1079 pmb->context1 = NULL;
1081 ndlp->nlp_rpi = mb->un.varWords[0];
1082 ndlp->nlp_type |= NLP_FABRIC;
1083 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1085 if (phba->hba_state < LPFC_HBA_READY) {
1086 /* Link up discovery requires Fabrib registration. */
1087 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1088 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1089 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1090 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
1093 phba->fc_ns_retry = 0;
1094 /* Good status, issue CT Request to NameServer */
1095 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1096 /* Cannot issue NameServer Query, so finish up discovery */
1097 lpfc_disc_start(phba);
1100 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1102 mempool_free( pmb, phba->mbox_mem_pool);
1108 lpfc_register_remote_port(struct lpfc_hba * phba,
1109 struct lpfc_nodelist * ndlp)
1111 struct fc_rport *rport;
1112 struct lpfc_rport_data *rdata;
1113 struct fc_rport_identifiers rport_ids;
1115 /* Remote port has reappeared. Re-register w/ FC transport */
1116 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1117 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1118 rport_ids.port_id = ndlp->nlp_DID;
1119 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1121 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1123 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1124 "Warning: fc_remote_port_add failed\n");
1128 /* initialize static port data */
1129 rport->maxframe_size = ndlp->nlp_maxframe;
1130 rport->supported_classes = ndlp->nlp_class_sup;
1131 rdata = rport->dd_data;
1132 rdata->pnode = ndlp;
1134 if (ndlp->nlp_type & NLP_FCP_TARGET)
1135 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1136 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1137 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1140 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1141 fc_remote_port_rolechg(rport, rport_ids.roles);
1143 if ((rport->scsi_target_id != -1) &&
1144 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1145 ndlp->nlp_sid = rport->scsi_target_id;
1152 lpfc_unregister_remote_port(struct lpfc_hba * phba,
1153 struct lpfc_nodelist * ndlp)
1155 struct fc_rport *rport = ndlp->rport;
1156 struct lpfc_rport_data *rdata = rport->dd_data;
1158 if (rport->scsi_target_id == -1) {
1160 rdata->pnode = NULL;
1163 fc_remote_port_delete(rport);
1169 lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count)
1172 case NLP_STE_UNUSED_NODE:
1173 phba->fc_unused_cnt += count;
1175 case NLP_STE_PLOGI_ISSUE:
1176 phba->fc_plogi_cnt += count;
1178 case NLP_STE_ADISC_ISSUE:
1179 phba->fc_adisc_cnt += count;
1181 case NLP_STE_REG_LOGIN_ISSUE:
1182 phba->fc_reglogin_cnt += count;
1184 case NLP_STE_PRLI_ISSUE:
1185 phba->fc_prli_cnt += count;
1187 case NLP_STE_UNMAPPED_NODE:
1188 phba->fc_unmap_cnt += count;
1190 case NLP_STE_MAPPED_NODE:
1191 phba->fc_map_cnt += count;
1193 case NLP_STE_NPR_NODE:
1194 phba->fc_npr_cnt += count;
1200 lpfc_delink_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1202 switch (ndlp->nlp_flag & NLP_LIST_MASK) {
1203 case NLP_UNUSED_LIST:
1204 list_del_init(&ndlp->nlp_listp);
1206 case NLP_PLOGI_LIST:
1207 list_del_init(&ndlp->nlp_listp);
1209 case NLP_ADISC_LIST:
1210 list_del_init(&ndlp->nlp_listp);
1212 case NLP_REGLOGIN_LIST:
1213 list_del_init(&ndlp->nlp_listp);
1216 list_del_init(&ndlp->nlp_listp);
1218 case NLP_UNMAPPED_LIST:
1219 list_del_init(&ndlp->nlp_listp);
1221 case NLP_MAPPED_LIST:
1222 list_del_init(&ndlp->nlp_listp);
1225 list_del_init(&ndlp->nlp_listp);
1229 ndlp->nlp_flag &= ~NLP_LIST_MASK;
1233 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1235 struct lpfc_sli *psli;
1238 /* Sanity check to ensure we are not moving to / from the same list */
1239 if ((nlp->nlp_flag & NLP_LIST_MASK) == list)
1242 spin_lock_irq(phba->host->host_lock);
1243 lpfc_delink_node(phba, nlp);
1245 /* Add NPort <did> to <num> list */
1246 lpfc_printf_log(phba,
1249 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1251 nlp->nlp_DID, list, nlp->nlp_flag);
1254 case NLP_UNUSED_LIST:
1255 nlp->nlp_flag |= list;
1256 /* Put it at the end of the unused list */
1257 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1259 case NLP_PLOGI_LIST:
1260 nlp->nlp_flag |= list;
1261 /* Put it at the end of the plogi list */
1262 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1264 case NLP_ADISC_LIST:
1265 nlp->nlp_flag |= list;
1266 /* Put it at the end of the adisc list */
1267 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1269 case NLP_REGLOGIN_LIST:
1270 nlp->nlp_flag |= list;
1271 /* Put it at the end of the reglogin list */
1272 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1275 nlp->nlp_flag |= list;
1276 /* Put it at the end of the prli list */
1277 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1279 case NLP_UNMAPPED_LIST:
1280 nlp->nlp_flag |= list;
1281 /* Put it at the end of the unmap list */
1282 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1284 case NLP_MAPPED_LIST:
1285 nlp->nlp_flag |= list;
1286 /* Put it at the end of the map list */
1287 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1290 nlp->nlp_flag |= list;
1291 /* Put it at the end of the npr list */
1292 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1296 spin_unlock_irq(phba->host->host_lock);
1301 lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1302 int old_state, int new_state)
1304 if (new_state == NLP_STE_UNMAPPED_NODE) {
1305 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1306 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1307 ndlp->nlp_type |= NLP_FC_NODE;
1309 if (new_state == NLP_STE_MAPPED_NODE)
1310 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1311 if (new_state == NLP_STE_NPR_NODE)
1312 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1314 /* Transport interface */
1315 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1316 old_state == NLP_STE_UNMAPPED_NODE)) {
1317 phba->nport_event_cnt++;
1318 lpfc_unregister_remote_port(phba, ndlp);
1321 if (new_state == NLP_STE_MAPPED_NODE ||
1322 new_state == NLP_STE_UNMAPPED_NODE) {
1323 phba->nport_event_cnt++;
1325 * Tell the fc transport about the port, if we haven't
1326 * already. If we have, and it's a scsi entity, be
1327 * sure to unblock any attached scsi devices
1330 ndlp->rport->port_state == FC_PORTSTATE_BLOCKED)
1331 lpfc_register_remote_port(phba, ndlp);
1335 * if we added to Mapped list, but the remote port
1336 * registration failed or assigned a target id outside
1337 * our presentable range - move the node to the
1340 if (new_state == NLP_STE_MAPPED_NODE &&
1342 ndlp->rport->scsi_target_id == -1 ||
1343 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1344 spin_lock_irq(phba->host->host_lock);
1345 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1346 spin_unlock_irq(phba->host->host_lock);
1347 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1352 lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state)
1354 int old_state = ndlp->nlp_state;
1355 static int list_id[] = {
1356 [NLP_STE_UNUSED_NODE] = NLP_UNUSED_LIST,
1357 [NLP_STE_PLOGI_ISSUE] = NLP_PLOGI_LIST,
1358 [NLP_STE_ADISC_ISSUE] = NLP_ADISC_LIST,
1359 [NLP_STE_REG_LOGIN_ISSUE] = NLP_REGLOGIN_LIST,
1360 [NLP_STE_PRLI_ISSUE] = NLP_PRLI_LIST,
1361 [NLP_STE_UNMAPPED_NODE] = NLP_UNMAPPED_LIST,
1362 [NLP_STE_MAPPED_NODE] = NLP_MAPPED_LIST,
1363 [NLP_STE_NPR_NODE] = NLP_NPR_LIST,
1366 if (old_state == NLP_STE_NPR_NODE &&
1367 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1368 state != NLP_STE_NPR_NODE)
1369 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1370 if (old_state == NLP_STE_UNMAPPED_NODE) {
1371 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1372 ndlp->nlp_type &= ~NLP_FC_NODE;
1375 if (old_state && !list_empty(&ndlp->nlp_listp))
1376 lpfc_nlp_counters(phba, old_state, -1);
1378 ndlp->nlp_state = state;
1379 lpfc_nlp_list(phba, ndlp, list_id[state]);
1380 lpfc_nlp_counters(phba, state, 1);
1382 lpfc_nlp_state_cleanup(phba, ndlp, old_state, state);
1386 lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1388 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1389 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1390 spin_lock_irq(phba->host->host_lock);
1391 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1392 lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
1393 lpfc_delink_node(phba, ndlp);
1394 spin_unlock_irq(phba->host->host_lock);
1395 lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0);
1399 lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1401 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1402 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1403 spin_lock_irq(phba->host->host_lock);
1404 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1405 lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
1406 lpfc_delink_node(phba, ndlp);
1407 spin_unlock_irq(phba->host->host_lock);
1408 lpfc_nlp_remove(phba, ndlp);
1412 * Start / ReStart rescue timer for Discovery / RSCN handling
1415 lpfc_set_disctmo(struct lpfc_hba * phba)
1419 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1420 /* For FAN, timeout should be greater then edtov */
1421 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1423 /* Normal discovery timeout should be > then ELS/CT timeout
1424 * FC spec states we need 3 * ratov for CT requests
1426 tmo = ((phba->fc_ratov * 3) + 3);
1429 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1430 spin_lock_irq(phba->host->host_lock);
1431 phba->fc_flag |= FC_DISC_TMO;
1432 spin_unlock_irq(phba->host->host_lock);
1434 /* Start Discovery Timer state <hba_state> */
1435 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1436 "%d:0247 Start Discovery Timer state x%x "
1437 "Data: x%x x%lx x%x x%x\n",
1439 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1440 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1446 * Cancel rescue timer for Discovery / RSCN handling
1449 lpfc_can_disctmo(struct lpfc_hba * phba)
1451 /* Turn off discovery timer if its running */
1452 if (phba->fc_flag & FC_DISC_TMO) {
1453 spin_lock_irq(phba->host->host_lock);
1454 phba->fc_flag &= ~FC_DISC_TMO;
1455 spin_unlock_irq(phba->host->host_lock);
1456 del_timer_sync(&phba->fc_disctmo);
1457 phba->work_hba_events &= ~WORKER_DISC_TMO;
1460 /* Cancel Discovery Timer state <hba_state> */
1461 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1462 "%d:0248 Cancel Discovery Timer state x%x "
1463 "Data: x%x x%x x%x\n",
1464 phba->brd_no, phba->hba_state, phba->fc_flag,
1465 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1471 * Check specified ring for outstanding IOCB on the SLI queue
1472 * Return true if iocb matches the specified nport
1475 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1476 struct lpfc_sli_ring * pring,
1477 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1479 struct lpfc_sli *psli;
1484 if (pring->ringno == LPFC_ELS_RING) {
1485 switch (icmd->ulpCommand) {
1486 case CMD_GEN_REQUEST64_CR:
1487 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1489 case CMD_ELS_REQUEST64_CR:
1490 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1492 case CMD_XMIT_ELS_RSP64_CX:
1493 if (iocb->context1 == (uint8_t *) ndlp)
1496 } else if (pring->ringno == psli->extra_ring) {
1498 } else if (pring->ringno == psli->fcp_ring) {
1499 /* Skip match check if waiting to relogin to FCP target */
1500 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1501 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1504 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1507 } else if (pring->ringno == psli->next_ring) {
1514 * Free resources / clean up outstanding I/Os
1515 * associated with nlp_rpi in the LPFC_NODELIST entry.
1518 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1520 LIST_HEAD(completions);
1521 struct lpfc_sli *psli;
1522 struct lpfc_sli_ring *pring;
1523 struct lpfc_iocbq *iocb, *next_iocb;
1528 * Everything that matches on txcmplq will be returned
1529 * by firmware with a no rpi error.
1532 rpi = ndlp->nlp_rpi;
1534 /* Now process each ring */
1535 for (i = 0; i < psli->num_rings; i++) {
1536 pring = &psli->ring[i];
1538 spin_lock_irq(phba->host->host_lock);
1539 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1542 * Check to see if iocb matches the nport we are
1545 if ((lpfc_check_sli_ndlp
1546 (phba, pring, iocb, ndlp))) {
1547 /* It matches, so deque and call compl
1549 list_move_tail(&iocb->list,
1554 spin_unlock_irq(phba->host->host_lock);
1559 while (!list_empty(&completions)) {
1560 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1561 list_del(&iocb->list);
1563 if (iocb->iocb_cmpl) {
1565 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1566 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1567 (iocb->iocb_cmpl) (phba, iocb, iocb);
1569 lpfc_sli_release_iocbq(phba, iocb);
1576 * Free rpi associated with LPFC_NODELIST entry.
1577 * This routine is called from lpfc_freenode(), when we are removing
1578 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1579 * LOGO that completes successfully, and we are waiting to PLOGI back
1580 * to the remote NPort. In addition, it is called after we receive
1581 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1582 * we are waiting to PLOGI back to the remote NPort.
1585 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1590 if (ndlp->nlp_rpi) {
1591 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1592 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1593 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1594 rc = lpfc_sli_issue_mbox
1595 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1596 if (rc == MBX_NOT_FINISHED)
1597 mempool_free( mbox, phba->mbox_mem_pool);
1599 lpfc_no_rpi(phba, ndlp);
1607 * Free resources associated with LPFC_NODELIST entry
1608 * so it can be freed.
1611 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1614 LPFC_MBOXQ_t *nextmb;
1615 struct lpfc_dmabuf *mp;
1617 /* Cleanup node for NPort <nlp_DID> */
1618 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1619 "%d:0900 Cleanup node for NPort x%x "
1620 "Data: x%x x%x x%x\n",
1621 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1622 ndlp->nlp_state, ndlp->nlp_rpi);
1624 lpfc_dequeue_node(phba, ndlp);
1626 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1627 if ((mb = phba->sli.mbox_active)) {
1628 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1629 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1630 mb->context2 = NULL;
1631 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1635 spin_lock_irq(phba->host->host_lock);
1636 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1637 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1638 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1639 mp = (struct lpfc_dmabuf *) (mb->context1);
1641 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1644 list_del(&mb->list);
1645 mempool_free(mb, phba->mbox_mem_pool);
1648 spin_unlock_irq(phba->host->host_lock);
1650 lpfc_els_abort(phba,ndlp);
1651 spin_lock_irq(phba->host->host_lock);
1652 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1653 spin_unlock_irq(phba->host->host_lock);
1655 ndlp->nlp_last_elscmd = 0;
1656 del_timer_sync(&ndlp->nlp_delayfunc);
1658 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1659 list_del_init(&ndlp->els_retry_evt.evt_listp);
1661 lpfc_unreg_rpi(phba, ndlp);
1667 * Check to see if we can free the nlp back to the freelist.
1668 * If we are in the middle of using the nlp in the discovery state
1669 * machine, defer the free till we reach the end of the state machine.
1672 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1674 struct lpfc_rport_data *rdata;
1676 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1677 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1680 if (ndlp->nlp_disc_refcnt) {
1681 spin_lock_irq(phba->host->host_lock);
1682 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1683 spin_unlock_irq(phba->host->host_lock);
1685 lpfc_freenode(phba, ndlp);
1687 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1688 rdata = ndlp->rport->dd_data;
1689 rdata->pnode = NULL;
1693 mempool_free( ndlp, phba->nlp_mem_pool);
1699 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1705 if (did == Bcast_DID)
1708 if (ndlp->nlp_DID == 0) {
1712 /* First check for Direct match */
1713 if (ndlp->nlp_DID == did)
1716 /* Next check for area/domain identically equals 0 match */
1717 mydid.un.word = phba->fc_myDID;
1718 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1722 matchdid.un.word = did;
1723 ndlpdid.un.word = ndlp->nlp_DID;
1724 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1725 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1726 (mydid.un.b.area == matchdid.un.b.area)) {
1727 if ((ndlpdid.un.b.domain == 0) &&
1728 (ndlpdid.un.b.area == 0)) {
1729 if (ndlpdid.un.b.id)
1735 matchdid.un.word = ndlp->nlp_DID;
1736 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1737 (mydid.un.b.area == ndlpdid.un.b.area)) {
1738 if ((matchdid.un.b.domain == 0) &&
1739 (matchdid.un.b.area == 0)) {
1740 if (matchdid.un.b.id)
1748 /* Search for a nodelist entry on a specific list */
1749 struct lpfc_nodelist *
1750 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1752 struct lpfc_nodelist *ndlp;
1753 struct list_head *lists[]={&phba->fc_nlpunmap_list,
1754 &phba->fc_nlpmap_list,
1755 &phba->fc_plogi_list,
1756 &phba->fc_adisc_list,
1757 &phba->fc_reglogin_list,
1758 &phba->fc_prli_list,
1760 &phba->fc_unused_list};
1761 uint32_t search[]={NLP_SEARCH_UNMAPPED,
1765 NLP_SEARCH_REGLOGIN,
1772 spin_lock_irq(phba->host->host_lock);
1773 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
1774 if (!(order & search[i]))
1776 list_for_each_entry(ndlp, lists[i], nlp_listp) {
1777 if (lpfc_matchdid(phba, ndlp, did)) {
1778 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1779 ((uint32_t) ndlp->nlp_xri << 16) |
1780 ((uint32_t) ndlp->nlp_type << 8) |
1781 ((uint32_t) ndlp->nlp_rpi & 0xff));
1782 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1783 "%d:0929 FIND node DID "
1784 " Data: x%p x%x x%x x%x\n",
1786 ndlp, ndlp->nlp_DID,
1787 ndlp->nlp_flag, data1);
1788 spin_unlock_irq(phba->host->host_lock);
1793 spin_unlock_irq(phba->host->host_lock);
1795 /* FIND node did <did> NOT FOUND */
1796 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1797 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1798 phba->brd_no, did, order);
1802 struct lpfc_nodelist *
1803 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1805 struct lpfc_nodelist *ndlp;
1808 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1810 if ((phba->fc_flag & FC_RSCN_MODE) &&
1811 ((lpfc_rscn_payload_check(phba, did) == 0)))
1813 ndlp = (struct lpfc_nodelist *)
1814 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1817 lpfc_nlp_init(phba, ndlp, did);
1818 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1819 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1822 if (phba->fc_flag & FC_RSCN_MODE) {
1823 if (lpfc_rscn_payload_check(phba, did)) {
1824 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1826 /* Since this node is marked for discovery,
1827 * delay timeout is not needed.
1829 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1830 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1834 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1835 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
1837 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1838 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1843 /* Build a list of nodes to discover based on the loopmap */
1845 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1848 uint32_t alpa, index;
1850 if (phba->hba_state <= LPFC_LINK_DOWN) {
1853 if (phba->fc_topology != TOPOLOGY_LOOP) {
1857 /* Check for loop map present or not */
1858 if (phba->alpa_map[0]) {
1859 for (j = 1; j <= phba->alpa_map[0]; j++) {
1860 alpa = phba->alpa_map[j];
1862 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1865 lpfc_setup_disc_node(phba, alpa);
1868 /* No alpamap, so try all alpa's */
1869 for (j = 0; j < FC_MAXLOOP; j++) {
1870 /* If cfg_scan_down is set, start from highest
1871 * ALPA (0xef) to lowest (0x1).
1873 if (phba->cfg_scan_down)
1876 index = FC_MAXLOOP - j - 1;
1877 alpa = lpfcAlpaArray[index];
1878 if ((phba->fc_myDID & 0xff) == alpa) {
1882 lpfc_setup_disc_node(phba, alpa);
1888 /* Start Link up / RSCN discovery on NPR list */
1890 lpfc_disc_start(struct lpfc_hba * phba)
1892 struct lpfc_sli *psli;
1894 struct lpfc_nodelist *ndlp, *next_ndlp;
1895 uint32_t did_changed, num_sent;
1896 uint32_t clear_la_pending;
1901 if (phba->hba_state <= LPFC_LINK_DOWN) {
1904 if (phba->hba_state == LPFC_CLEAR_LA)
1905 clear_la_pending = 1;
1907 clear_la_pending = 0;
1909 if (phba->hba_state < LPFC_HBA_READY) {
1910 phba->hba_state = LPFC_DISC_AUTH;
1912 lpfc_set_disctmo(phba);
1914 if (phba->fc_prevDID == phba->fc_myDID) {
1919 phba->fc_prevDID = phba->fc_myDID;
1920 phba->num_disc_nodes = 0;
1922 /* Start Discovery state <hba_state> */
1923 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1924 "%d:0202 Start Discovery hba state x%x "
1925 "Data: x%x x%x x%x\n",
1926 phba->brd_no, phba->hba_state, phba->fc_flag,
1927 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1929 /* If our did changed, we MUST do PLOGI */
1930 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1932 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1934 spin_lock_irq(phba->host->host_lock);
1935 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1936 spin_unlock_irq(phba->host->host_lock);
1941 /* First do ADISCs - if any */
1942 num_sent = lpfc_els_disc_adisc(phba);
1947 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
1948 /* If we get here, there is nothing to ADISC */
1949 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1950 phba->hba_state = LPFC_CLEAR_LA;
1951 lpfc_clear_la(phba, mbox);
1952 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
1953 rc = lpfc_sli_issue_mbox(phba, mbox,
1954 (MBX_NOWAIT | MBX_STOP_IOCB));
1955 if (rc == MBX_NOT_FINISHED) {
1956 mempool_free( mbox, phba->mbox_mem_pool);
1957 lpfc_disc_flush_list(phba);
1958 psli->ring[(psli->extra_ring)].flag &=
1959 ~LPFC_STOP_IOCB_EVENT;
1960 psli->ring[(psli->fcp_ring)].flag &=
1961 ~LPFC_STOP_IOCB_EVENT;
1962 psli->ring[(psli->next_ring)].flag &=
1963 ~LPFC_STOP_IOCB_EVENT;
1964 phba->hba_state = LPFC_HBA_READY;
1968 /* Next do PLOGIs - if any */
1969 num_sent = lpfc_els_disc_plogi(phba);
1974 if (phba->fc_flag & FC_RSCN_MODE) {
1975 /* Check to see if more RSCNs came in while we
1976 * were processing this one.
1978 if ((phba->fc_rscn_id_cnt == 0) &&
1979 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
1980 spin_lock_irq(phba->host->host_lock);
1981 phba->fc_flag &= ~FC_RSCN_MODE;
1982 spin_unlock_irq(phba->host->host_lock);
1984 lpfc_els_handle_rscn(phba);
1991 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
1992 * ring the match the sppecified nodelist.
1995 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1997 LIST_HEAD(completions);
1998 struct lpfc_sli *psli;
2000 struct lpfc_iocbq *iocb, *next_iocb;
2001 struct lpfc_sli_ring *pring;
2004 pring = &psli->ring[LPFC_ELS_RING];
2006 /* Error matching iocb on txq or txcmplq
2007 * First check the txq.
2009 spin_lock_irq(phba->host->host_lock);
2010 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2011 if (iocb->context1 != ndlp) {
2015 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2016 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2018 list_move_tail(&iocb->list, &completions);
2023 /* Next check the txcmplq */
2024 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2025 if (iocb->context1 != ndlp) {
2029 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2030 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2031 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2034 spin_unlock_irq(phba->host->host_lock);
2036 while (!list_empty(&completions)) {
2037 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2038 list_del(&iocb->list);
2040 if (iocb->iocb_cmpl) {
2042 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2043 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2044 (iocb->iocb_cmpl) (phba, iocb, iocb);
2046 lpfc_sli_release_iocbq(phba, iocb);
2053 lpfc_disc_flush_list(struct lpfc_hba * phba)
2055 struct lpfc_nodelist *ndlp, *next_ndlp;
2057 if (phba->fc_plogi_cnt) {
2058 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2060 lpfc_free_tx(phba, ndlp);
2061 lpfc_nlp_remove(phba, ndlp);
2064 if (phba->fc_adisc_cnt) {
2065 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2067 lpfc_free_tx(phba, ndlp);
2068 lpfc_nlp_remove(phba, ndlp);
2074 /*****************************************************************************/
2076 * NAME: lpfc_disc_timeout
2078 * FUNCTION: Fibre Channel driver discovery timeout routine.
2080 * EXECUTION ENVIRONMENT: interrupt only
2088 /*****************************************************************************/
2090 lpfc_disc_timeout(unsigned long ptr)
2092 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2093 unsigned long flags = 0;
2095 if (unlikely(!phba))
2098 spin_lock_irqsave(phba->host->host_lock, flags);
2099 if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2100 phba->work_hba_events |= WORKER_DISC_TMO;
2101 if (phba->work_wait)
2102 wake_up(phba->work_wait);
2104 spin_unlock_irqrestore(phba->host->host_lock, flags);
2109 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2111 struct lpfc_sli *psli;
2112 struct lpfc_nodelist *ndlp, *next_ndlp;
2113 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2114 int rc, clrlaerr = 0;
2116 if (unlikely(!phba))
2119 if (!(phba->fc_flag & FC_DISC_TMO))
2124 spin_lock_irq(phba->host->host_lock);
2125 phba->fc_flag &= ~FC_DISC_TMO;
2126 spin_unlock_irq(phba->host->host_lock);
2128 switch (phba->hba_state) {
2130 case LPFC_LOCAL_CFG_LINK:
2131 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2133 lpfc_printf_log(phba,
2136 "%d:0221 FAN timeout\n",
2139 /* Start discovery by sending FLOGI, clean up old rpis */
2140 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2142 if (ndlp->nlp_type & NLP_FABRIC) {
2143 /* Clean up the ndlp on Fabric connections */
2144 lpfc_drop_node(phba, ndlp);
2145 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2146 /* Fail outstanding IO now since device
2147 * is marked for PLOGI.
2149 lpfc_unreg_rpi(phba, ndlp);
2152 phba->hba_state = LPFC_FLOGI;
2153 lpfc_set_disctmo(phba);
2154 lpfc_initial_flogi(phba);
2158 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2159 /* Initial FLOGI timeout */
2160 lpfc_printf_log(phba,
2163 "%d:0222 Initial FLOGI timeout\n",
2166 /* Assume no Fabric and go on with discovery.
2167 * Check for outstanding ELS FLOGI to abort.
2170 /* FLOGI failed, so just use loop map to make discovery list */
2171 lpfc_disc_list_loopmap(phba);
2173 /* Start discovery */
2174 lpfc_disc_start(phba);
2177 case LPFC_FABRIC_CFG_LINK:
2178 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2180 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2181 "%d:0223 Timeout while waiting for NameServer "
2182 "login\n", phba->brd_no);
2184 /* Next look for NameServer ndlp */
2185 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2187 lpfc_nlp_remove(phba, ndlp);
2188 /* Start discovery */
2189 lpfc_disc_start(phba);
2193 /* Check for wait for NameServer Rsp timeout */
2194 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2195 "%d:0224 NameServer Query timeout "
2198 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2200 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2203 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2204 /* Try it one more time */
2205 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2209 phba->fc_ns_retry = 0;
2212 /* Nothing to authenticate, so CLEAR_LA right now */
2213 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2216 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2217 "%d:0226 Device Discovery "
2218 "completion error\n",
2220 phba->hba_state = LPFC_HBA_ERROR;
2224 phba->hba_state = LPFC_CLEAR_LA;
2225 lpfc_clear_la(phba, clearlambox);
2226 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2227 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2228 (MBX_NOWAIT | MBX_STOP_IOCB));
2229 if (rc == MBX_NOT_FINISHED) {
2230 mempool_free(clearlambox, phba->mbox_mem_pool);
2235 /* Setup and issue mailbox INITIALIZE LINK command */
2236 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2237 if (!initlinkmbox) {
2238 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2239 "%d:0206 Device Discovery "
2240 "completion error\n",
2242 phba->hba_state = LPFC_HBA_ERROR;
2246 lpfc_linkdown(phba);
2247 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2248 phba->cfg_link_speed);
2249 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2250 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2251 (MBX_NOWAIT | MBX_STOP_IOCB));
2252 if (rc == MBX_NOT_FINISHED)
2253 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2257 case LPFC_DISC_AUTH:
2258 /* Node Authentication timeout */
2259 lpfc_printf_log(phba,
2262 "%d:0227 Node Authentication timeout\n",
2264 lpfc_disc_flush_list(phba);
2265 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2268 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2269 "%d:0207 Device Discovery "
2270 "completion error\n",
2272 phba->hba_state = LPFC_HBA_ERROR;
2275 phba->hba_state = LPFC_CLEAR_LA;
2276 lpfc_clear_la(phba, clearlambox);
2277 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2278 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2279 (MBX_NOWAIT | MBX_STOP_IOCB));
2280 if (rc == MBX_NOT_FINISHED) {
2281 mempool_free(clearlambox, phba->mbox_mem_pool);
2287 /* CLEAR LA timeout */
2288 lpfc_printf_log(phba,
2291 "%d:0228 CLEAR LA timeout\n",
2296 case LPFC_HBA_READY:
2297 if (phba->fc_flag & FC_RSCN_MODE) {
2298 lpfc_printf_log(phba,
2301 "%d:0231 RSCN timeout Data: x%x x%x\n",
2303 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2305 /* Cleanup any outstanding ELS commands */
2306 lpfc_els_flush_cmd(phba);
2308 lpfc_els_flush_rscn(phba);
2309 lpfc_disc_flush_list(phba);
2315 lpfc_disc_flush_list(phba);
2316 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2317 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2318 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2319 phba->hba_state = LPFC_HBA_READY;
2326 * This routine handles processing a NameServer REG_LOGIN mailbox
2327 * command upon completion. It is setup in the LPFC_MBOXQ
2328 * as the completion routine when the command is
2329 * handed off to the SLI layer.
2332 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2334 struct lpfc_sli *psli;
2336 struct lpfc_dmabuf *mp;
2337 struct lpfc_nodelist *ndlp;
2342 ndlp = (struct lpfc_nodelist *) pmb->context2;
2343 mp = (struct lpfc_dmabuf *) (pmb->context1);
2345 pmb->context1 = NULL;
2347 ndlp->nlp_rpi = mb->un.varWords[0];
2348 ndlp->nlp_type |= NLP_FABRIC;
2349 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
2351 /* Start issuing Fabric-Device Management Interface (FDMI)
2352 * command to 0xfffffa (FDMI well known port)
2354 if (phba->cfg_fdmi_on == 1) {
2355 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2358 * Delay issuing FDMI command if fdmi-on=2
2359 * (supporting RPA/hostnmae)
2361 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2364 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2366 mempool_free( pmb, phba->mbox_mem_pool);
2372 * This routine looks up the ndlp lists
2373 * for the given RPI. If rpi found
2374 * it return the node list pointer
2377 struct lpfc_nodelist *
2378 __lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2380 struct lpfc_nodelist *ndlp;
2381 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2382 &phba->fc_nlpmap_list,
2383 &phba->fc_plogi_list,
2384 &phba->fc_adisc_list,
2385 &phba->fc_reglogin_list};
2388 for (i = 0; i < ARRAY_SIZE(lists); i++ )
2389 list_for_each_entry(ndlp, lists[i], nlp_listp)
2390 if (ndlp->nlp_rpi == rpi) {
2396 struct lpfc_nodelist *
2397 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2399 struct lpfc_nodelist *ndlp;
2401 spin_lock_irq(phba->host->host_lock);
2402 ndlp = __lpfc_findnode_rpi(phba, rpi);
2403 spin_unlock_irq(phba->host->host_lock);
2408 * This routine looks up the ndlp lists
2409 * for the given WWPN. If WWPN found
2410 * it return the node list pointer
2413 struct lpfc_nodelist *
2414 lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
2415 struct lpfc_name * wwpn)
2417 struct lpfc_nodelist *ndlp;
2418 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2419 &phba->fc_nlpmap_list,
2421 &phba->fc_plogi_list,
2422 &phba->fc_adisc_list,
2423 &phba->fc_reglogin_list,
2424 &phba->fc_prli_list};
2425 uint32_t search[]={NLP_SEARCH_UNMAPPED,
2430 NLP_SEARCH_REGLOGIN,
2434 spin_lock_irq(phba->host->host_lock);
2435 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
2436 if (!(order & search[i]))
2438 list_for_each_entry(ndlp, lists[i], nlp_listp) {
2439 if (memcmp(&ndlp->nlp_portname, wwpn,
2440 sizeof(struct lpfc_name)) == 0) {
2441 spin_unlock_irq(phba->host->host_lock);
2446 spin_unlock_irq(phba->host->host_lock);
2451 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2454 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2455 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2456 init_timer(&ndlp->nlp_delayfunc);
2457 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2458 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2459 ndlp->nlp_DID = did;
2460 ndlp->nlp_phba = phba;
2461 ndlp->nlp_sid = NLP_NO_SID;