1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60 lpfc_terminate_rport_io(struct fc_rport *rport)
62 struct lpfc_rport_data *rdata;
63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
66 rdata = rport->dd_data;
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
77 phba = ndlp->vport->phba;
79 if (ndlp->nlp_sid != NLP_NO_SID) {
80 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
81 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88 * This function will be called when dev_loss_tmo fire.
91 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
93 struct lpfc_rport_data *rdata;
94 struct lpfc_nodelist * ndlp;
97 struct lpfc_hba *phba;
98 struct lpfc_vport *vport;
100 rdata = rport->dd_data;
104 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
105 printk(KERN_ERR "Cannot find remote node"
106 " for rport in dev_loss_tmo_callbk x%x\n",
111 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
114 name = (uint8_t *)&ndlp->nlp_portname;
118 if (ndlp->nlp_sid != NLP_NO_SID) {
120 /* flush the target */
121 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
122 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
124 if (vport->load_flag & FC_UNLOADING)
128 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
129 "%d:0203 Devloss timeout on "
130 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
131 "NPort x%x Data: x%x x%x x%x\n",
133 *name, *(name+1), *(name+2), *(name+3),
134 *(name+4), *(name+5), *(name+6), *(name+7),
135 ndlp->nlp_DID, ndlp->nlp_flag,
136 ndlp->nlp_state, ndlp->nlp_rpi);
138 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
139 "%d:0204 Devloss timeout on "
140 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
141 "NPort x%x Data: x%x x%x x%x\n",
143 *name, *(name+1), *(name+2), *(name+3),
144 *(name+4), *(name+5), *(name+6), *(name+7),
145 ndlp->nlp_DID, ndlp->nlp_flag,
146 ndlp->nlp_state, ndlp->nlp_rpi);
149 if (!(vport->load_flag & FC_UNLOADING) &&
150 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
151 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
152 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
153 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
158 put_device(&rport->dev);
165 lpfc_work_list_done(struct lpfc_hba *phba)
167 struct lpfc_work_evt *evtp = NULL;
168 struct lpfc_nodelist *ndlp;
171 spin_lock_irq(&phba->hbalock);
172 while (!list_empty(&phba->work_list)) {
173 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
175 spin_unlock_irq(&phba->hbalock);
178 case LPFC_EVT_ELS_RETRY:
179 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
180 lpfc_els_retry_delay_handler(ndlp);
183 case LPFC_EVT_ONLINE:
184 if (phba->link_state < LPFC_LINK_DOWN)
185 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
187 *(int *) (evtp->evt_arg1) = 0;
188 complete((struct completion *)(evtp->evt_arg2));
190 case LPFC_EVT_OFFLINE_PREP:
191 if (phba->link_state >= LPFC_LINK_DOWN)
192 lpfc_offline_prep(phba);
193 *(int *)(evtp->evt_arg1) = 0;
194 complete((struct completion *)(evtp->evt_arg2));
196 case LPFC_EVT_OFFLINE:
198 lpfc_sli_brdrestart(phba);
199 *(int *)(evtp->evt_arg1) =
200 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
201 lpfc_unblock_mgmt_io(phba);
202 complete((struct completion *)(evtp->evt_arg2));
204 case LPFC_EVT_WARM_START:
206 lpfc_reset_barrier(phba);
207 lpfc_sli_brdreset(phba);
208 lpfc_hba_down_post(phba);
209 *(int *)(evtp->evt_arg1) =
210 lpfc_sli_brdready(phba, HS_MBRDY);
211 lpfc_unblock_mgmt_io(phba);
212 complete((struct completion *)(evtp->evt_arg2));
216 *(int *)(evtp->evt_arg1)
217 = (phba->pport->stopped)
218 ? 0 : lpfc_sli_brdkill(phba);
219 lpfc_unblock_mgmt_io(phba);
220 complete((struct completion *)(evtp->evt_arg2));
225 spin_lock_irq(&phba->hbalock);
227 spin_unlock_irq(&phba->hbalock);
232 lpfc_work_done(struct lpfc_hba *phba)
234 struct lpfc_sli_ring *pring;
236 uint32_t ha_copy, control, work_port_events;
237 struct lpfc_vport *vport;
239 spin_lock_irq(&phba->hbalock);
240 ha_copy = phba->work_ha;
242 spin_unlock_irq(&phba->hbalock);
244 if (ha_copy & HA_ERATT)
245 lpfc_handle_eratt(phba);
247 if (ha_copy & HA_MBATT)
248 lpfc_sli_handle_mb_event(phba);
250 if (ha_copy & HA_LATT)
251 lpfc_handle_latt(phba);
255 work_port_events = vport->work_port_events;
257 if (work_port_events & WORKER_DISC_TMO)
258 lpfc_disc_timeout_handler(vport);
260 if (work_port_events & WORKER_ELS_TMO)
261 lpfc_els_timeout_handler(vport);
263 if (work_port_events & WORKER_MBOX_TMO)
264 lpfc_mbox_timeout_handler(phba);
266 if (work_port_events & WORKER_FDMI_TMO)
267 lpfc_fdmi_timeout_handler(vport);
269 spin_lock_irq(&phba->hbalock);
270 vport->work_port_events &= ~work_port_events;
271 spin_unlock_irq(&phba->hbalock);
273 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
274 pring = &phba->sli.ring[i];
275 if ((ha_copy & HA_RXATT)
276 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
277 if (pring->flag & LPFC_STOP_IOCB_MASK) {
278 pring->flag |= LPFC_DEFERRED_RING_EVENT;
280 lpfc_sli_handle_slow_ring_event(phba, pring,
283 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
286 * Turn on Ring interrupts
288 spin_lock_irq(&phba->hbalock);
289 control = readl(phba->HCregaddr);
290 control |= (HC_R0INT_ENA << i);
291 writel(control, phba->HCregaddr);
292 readl(phba->HCregaddr); /* flush */
293 spin_unlock_irq(&phba->hbalock);
297 lpfc_work_list_done(phba);
301 check_work_wait_done(struct lpfc_hba *phba)
303 struct lpfc_vport *vport = phba->pport;
309 spin_lock_irq(&phba->hbalock);
312 vport->work_port_events ||
313 (!list_empty(&phba->work_list)) ||
314 kthread_should_stop())
317 spin_unlock_irq(&phba->hbalock);
322 lpfc_do_work(void *p)
324 struct lpfc_hba *phba = p;
326 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
328 set_user_nice(current, -20);
329 phba->work_wait = &work_waitq;
333 rc = wait_event_interruptible(work_waitq,
334 check_work_wait_done(phba));
337 if (kthread_should_stop())
340 lpfc_work_done(phba);
343 phba->work_wait = NULL;
348 * This is only called to handle FC worker events. Since this a rare
349 * occurance, we allocate a struct lpfc_work_evt structure here instead of
350 * embedding it in the IOCB.
353 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
356 struct lpfc_work_evt *evtp;
359 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
360 * be queued to worker thread for processing
362 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
366 evtp->evt_arg1 = arg1;
367 evtp->evt_arg2 = arg2;
370 spin_lock_irq(&phba->hbalock);
371 list_add_tail(&evtp->evt_listp, &phba->work_list);
373 wake_up(phba->work_wait);
374 spin_unlock_irq(&phba->hbalock);
380 lpfc_linkdown(struct lpfc_hba *phba)
382 struct lpfc_vport *vport = phba->pport;
383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
384 struct lpfc_sli *psli;
385 struct lpfc_nodelist *ndlp, *next_ndlp;
390 if (phba->link_state == LPFC_LINK_DOWN) {
393 spin_lock_irq(&phba->hbalock);
394 if (phba->link_state > LPFC_LINK_DOWN)
395 phba->link_state = LPFC_LINK_DOWN;
396 spin_unlock_irq(&phba->hbalock);
398 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
400 /* Clean up any firmware default rpi's */
401 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
403 lpfc_unreg_did(phba, 0xffffffff, mb);
404 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
405 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
406 == MBX_NOT_FINISHED) {
407 mempool_free(mb, phba->mbox_mem_pool);
411 /* Cleanup any outstanding RSCN activity */
412 lpfc_els_flush_rscn(vport);
414 /* Cleanup any outstanding ELS commands */
415 lpfc_els_flush_cmd(vport);
418 * Issue a LINK DOWN event to all nodes.
420 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
421 /* free any ndlp's on unused state */
422 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
423 lpfc_drop_node(vport, ndlp);
424 else /* otherwise, force node recovery. */
425 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
426 NLP_EVT_DEVICE_RECOVERY);
429 /* Setup myDID for link up if we are in pt2pt mode */
430 if (vport->fc_flag & FC_PT2PT) {
432 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 lpfc_config_link(phba, mb);
435 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
436 if (lpfc_sli_issue_mbox(phba, mb,
437 (MBX_NOWAIT | MBX_STOP_IOCB))
438 == MBX_NOT_FINISHED) {
439 mempool_free(mb, phba->mbox_mem_pool);
442 spin_lock_irq(shost->host_lock);
443 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
444 spin_unlock_irq(shost->host_lock);
447 spin_lock_irq(shost->host_lock);
448 vport->fc_flag &= ~FC_LBIT;
449 spin_unlock_irq(shost->host_lock);
451 /* Turn off discovery timer if its running */
452 lpfc_can_disctmo(vport);
454 /* Must process IOCBs on all rings to handle ABORTed I/Os */
459 lpfc_linkup(struct lpfc_hba *phba)
461 struct lpfc_vport *vport = phba->pport;
462 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
463 struct lpfc_nodelist *ndlp, *next_ndlp;
465 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
467 spin_lock_irq(shost->host_lock);
468 phba->link_state = LPFC_LINK_UP;
469 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
470 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
471 vport->fc_flag |= FC_NDISC_ACTIVE;
472 vport->fc_ns_retry = 0;
473 spin_unlock_irq(shost->host_lock);
476 if (vport->fc_flag & FC_LBIT) {
477 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
478 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
479 if (ndlp->nlp_type & NLP_FABRIC) {
481 * On Linkup its safe to clean up the
482 * ndlp from Fabric connections.
484 lpfc_nlp_set_state(vport, ndlp,
485 NLP_STE_UNUSED_NODE);
486 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
488 * Fail outstanding IO now since
489 * device is marked for PLOGI.
491 lpfc_unreg_rpi(vport, ndlp);
497 /* free any ndlp's in unused state */
498 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
500 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
501 lpfc_drop_node(vport, ndlp);
508 * This routine handles processing a CLEAR_LA mailbox
509 * command upon completion. It is setup in the LPFC_MBOXQ
510 * as the completion routine when the command is
511 * handed off to the SLI layer.
514 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
516 struct lpfc_vport *vport = pmb->vport;
517 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
518 struct lpfc_sli *psli = &phba->sli;
519 MAILBOX_t *mb = &pmb->mb;
522 /* Since we don't do discovery right now, turn these off here */
523 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
524 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
525 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
527 /* Check for error */
528 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
529 /* CLEAR_LA mbox error <mbxStatus> state <port_state> */
530 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
531 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
533 phba->brd_no, mb->mbxStatus, vport->port_state);
535 phba->link_state = LPFC_HBA_ERROR;
539 if (vport->fc_flag & FC_ABORT_DISCOVERY)
542 vport->num_disc_nodes = 0;
543 /* go thru NPR nodes and issue ELS PLOGIs */
544 if (vport->fc_npr_cnt)
545 lpfc_els_disc_plogi(vport);
547 if (!vport->num_disc_nodes) {
548 spin_lock_irq(shost->host_lock);
549 vport->fc_flag &= ~FC_NDISC_ACTIVE;
550 spin_unlock_irq(shost->host_lock);
553 printk(KERN_ERR "%s (%d): vport ready\n",
554 __FUNCTION__, __LINE__);
555 vport->port_state = LPFC_VPORT_READY;
558 /* Device Discovery completes */
559 lpfc_printf_log(phba,
562 "%d:0225 Device Discovery completes\n",
565 mempool_free(pmb, phba->mbox_mem_pool);
567 spin_lock_irq(shost->host_lock);
568 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
569 spin_unlock_irq(shost->host_lock);
571 del_timer_sync(&phba->fc_estabtmo);
573 lpfc_can_disctmo(vport);
575 /* turn on Link Attention interrupts */
577 spin_lock_irq(&phba->hbalock);
578 psli->sli_flag |= LPFC_PROCESS_LA;
579 control = readl(phba->HCregaddr);
580 control |= HC_LAINT_ENA;
581 writel(control, phba->HCregaddr);
582 readl(phba->HCregaddr); /* flush */
583 spin_unlock_irq(&phba->hbalock);
590 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
592 struct lpfc_vport *vport = pmb->vport;
593 struct lpfc_sli *psli = &phba->sli;
596 if (pmb->mb.mbxStatus)
599 mempool_free(pmb, phba->mbox_mem_pool);
601 if (phba->fc_topology == TOPOLOGY_LOOP &&
602 vport->fc_flag & FC_PUBLIC_LOOP &&
603 !(vport->fc_flag & FC_LBIT)) {
604 /* Need to wait for FAN - use discovery timer
605 * for timeout. port_state is identically
606 * LPFC_LOCAL_CFG_LINK while waiting for FAN
608 lpfc_set_disctmo(vport);
612 /* Start discovery by sending a FLOGI. port_state is identically
613 * LPFC_FLOGI while waiting for FLOGI cmpl
615 vport->port_state = LPFC_FLOGI;
616 lpfc_set_disctmo(vport);
617 lpfc_initial_flogi(vport);
621 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
622 "%d:0306 CONFIG_LINK mbxStatus error x%x "
624 phba->brd_no, pmb->mb.mbxStatus, vport->port_state);
628 phba->link_state = LPFC_HBA_ERROR;
630 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
631 "%d:0200 CONFIG_LINK bad hba state x%x\n",
632 phba->brd_no, vport->port_state);
634 lpfc_clear_la(phba, pmb);
635 printk(KERN_ERR "%s (%d): do clear_la\n",
636 __FUNCTION__, __LINE__);
637 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
639 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
640 if (rc == MBX_NOT_FINISHED) {
641 mempool_free(pmb, phba->mbox_mem_pool);
642 lpfc_disc_flush_list(vport);
643 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
644 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
645 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
646 printk(KERN_ERR "%s (%d): vport ready\n",
647 __FUNCTION__, __LINE__);
648 vport->port_state = LPFC_VPORT_READY;
654 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
656 struct lpfc_sli *psli = &phba->sli;
657 MAILBOX_t *mb = &pmb->mb;
658 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
659 struct lpfc_vport *vport = pmb->vport;
662 /* Check for error */
664 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
665 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
666 "%d:0319 READ_SPARAM mbxStatus error x%x "
668 phba->brd_no, mb->mbxStatus, vport->port_state);
671 phba->link_state = LPFC_HBA_ERROR;
675 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
676 sizeof (struct serv_parm));
677 if (phba->cfg_soft_wwnn)
678 u64_to_wwn(phba->cfg_soft_wwnn,
679 vport->fc_sparam.nodeName.u.wwn);
680 if (phba->cfg_soft_wwpn)
681 u64_to_wwn(phba->cfg_soft_wwpn,
682 vport->fc_sparam.portName.u.wwn);
683 memcpy((uint8_t *) &vport->fc_nodename,
684 (uint8_t *) &vport->fc_sparam.nodeName,
685 sizeof (struct lpfc_name));
686 memcpy((uint8_t *) &vport->fc_portname,
687 (uint8_t *) &vport->fc_sparam.portName,
688 sizeof (struct lpfc_name));
689 lpfc_mbuf_free(phba, mp->virt, mp->phys);
691 mempool_free(pmb, phba->mbox_mem_pool);
695 pmb->context1 = NULL;
696 lpfc_mbuf_free(phba, mp->virt, mp->phys);
698 if (phba->link_state != LPFC_CLEAR_LA) {
699 struct lpfc_sli_ring *extra_ring =
700 &psli->ring[psli->extra_ring];
701 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
702 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
704 lpfc_clear_la(phba, pmb);
705 printk(KERN_ERR "%s (%d): do clear_la\n",
706 __FUNCTION__, __LINE__);
707 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
709 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
710 == MBX_NOT_FINISHED) {
711 mempool_free(pmb, phba->mbox_mem_pool);
712 lpfc_disc_flush_list(vport);
713 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
714 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
715 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
716 printk(KERN_ERR "%s (%d): vport ready\n",
717 __FUNCTION__, __LINE__);
718 vport->port_state = LPFC_VPORT_READY;
721 mempool_free(pmb, phba->mbox_mem_pool);
727 lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
730 struct lpfc_hba *phba = vport->phba;
731 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
733 struct lpfc_dmabuf *mp;
736 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
737 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
739 spin_lock_irq(shost->host_lock);
740 switch (la->UlnkSpeed) {
742 phba->fc_linkspeed = LA_1GHZ_LINK;
745 phba->fc_linkspeed = LA_2GHZ_LINK;
748 phba->fc_linkspeed = LA_4GHZ_LINK;
751 phba->fc_linkspeed = LA_8GHZ_LINK;
754 phba->fc_linkspeed = LA_UNKNW_LINK;
758 phba->fc_topology = la->topology;
760 if (phba->fc_topology == TOPOLOGY_LOOP) {
761 /* Get Loop Map information */
764 vport->fc_flag |= FC_LBIT;
766 vport->fc_myDID = la->granted_AL_PA;
767 i = la->un.lilpBde64.tus.f.bdeSize;
770 phba->alpa_map[0] = 0;
772 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
783 numalpa = phba->alpa_map[0];
785 while (j < numalpa) {
786 memset(un.pamap, 0, 16);
787 for (k = 1; j < numalpa; k++) {
789 phba->alpa_map[j + 1];
794 /* Link Up Event ALPA map */
795 lpfc_printf_log(phba,
798 "%d:1304 Link Up Event "
799 "ALPA map Data: x%x "
802 un.pa.wd1, un.pa.wd2,
803 un.pa.wd3, un.pa.wd4);
808 vport->fc_myDID = phba->fc_pref_DID;
809 vport->fc_flag |= FC_LBIT;
811 spin_unlock_irq(shost->host_lock);
815 lpfc_read_sparam(phba, sparam_mbox);
816 sparam_mbox->vport = vport;
817 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
818 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
819 (MBX_NOWAIT | MBX_STOP_IOCB));
820 if (rc == MBX_NOT_FINISHED) {
821 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
822 lpfc_mbuf_free(phba, mp->virt, mp->phys);
824 mempool_free(sparam_mbox, phba->mbox_mem_pool);
826 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
832 vport->port_state = LPFC_LOCAL_CFG_LINK;
833 lpfc_config_link(phba, cfglink_mbox);
834 cfglink_mbox->vport = vport;
835 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
836 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
837 (MBX_NOWAIT | MBX_STOP_IOCB));
838 if (rc == MBX_NOT_FINISHED)
839 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
844 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
847 struct lpfc_sli *psli = &phba->sli;
851 /* turn on Link Attention interrupts - no CLEAR_LA needed */
852 spin_lock_irq(&phba->hbalock);
853 psli->sli_flag |= LPFC_PROCESS_LA;
854 control = readl(phba->HCregaddr);
855 control |= HC_LAINT_ENA;
856 writel(control, phba->HCregaddr);
857 readl(phba->HCregaddr); /* flush */
858 spin_unlock_irq(&phba->hbalock);
862 * This routine handles processing a READ_LA mailbox
863 * command upon completion. It is setup in the LPFC_MBOXQ
864 * as the completion routine when the command is
865 * handed off to the SLI layer.
868 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
870 struct lpfc_vport *vport = pmb->vport;
871 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
873 MAILBOX_t *mb = &pmb->mb;
874 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
876 /* Check for error */
878 lpfc_printf_log(phba,
881 "%d:1307 READ_LA mbox error x%x state x%x\n",
883 mb->mbxStatus, vport->port_state);
884 lpfc_mbx_issue_link_down(phba);
885 phba->link_state = LPFC_HBA_ERROR;
886 goto lpfc_mbx_cmpl_read_la_free_mbuf;
889 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
891 memcpy(&phba->alpa_map[0], mp->virt, 128);
893 spin_lock_irq(shost->host_lock);
895 vport->fc_flag |= FC_BYPASSED_MODE;
897 vport->fc_flag &= ~FC_BYPASSED_MODE;
898 spin_unlock_irq(shost->host_lock);
900 if (((phba->fc_eventTag + 1) < la->eventTag) ||
901 (phba->fc_eventTag == la->eventTag)) {
902 phba->fc_stat.LinkMultiEvent++;
903 if (la->attType == AT_LINK_UP)
904 if (phba->fc_eventTag != 0)
908 phba->fc_eventTag = la->eventTag;
910 if (la->attType == AT_LINK_UP) {
911 phba->fc_stat.LinkUp++;
912 if (phba->link_flag & LS_LOOPBACK_MODE) {
913 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
914 "%d:1306 Link Up Event in loop back mode "
915 "x%x received Data: x%x x%x x%x x%x\n",
916 phba->brd_no, la->eventTag, phba->fc_eventTag,
917 la->granted_AL_PA, la->UlnkSpeed,
920 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
921 "%d:1303 Link Up Event x%x received "
922 "Data: x%x x%x x%x x%x\n",
923 phba->brd_no, la->eventTag, phba->fc_eventTag,
924 la->granted_AL_PA, la->UlnkSpeed,
927 lpfc_mbx_process_link_up(vport, la);
929 phba->fc_stat.LinkDown++;
930 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
931 "%d:1305 Link Down Event x%x received "
932 "Data: x%x x%x x%x\n",
933 phba->brd_no, la->eventTag, phba->fc_eventTag,
934 phba->pport->port_state, vport->fc_flag);
935 lpfc_mbx_issue_link_down(phba);
938 lpfc_mbx_cmpl_read_la_free_mbuf:
939 lpfc_mbuf_free(phba, mp->virt, mp->phys);
941 mempool_free(pmb, phba->mbox_mem_pool);
946 * This routine handles processing a REG_LOGIN mailbox
947 * command upon completion. It is setup in the LPFC_MBOXQ
948 * as the completion routine when the command is
949 * handed off to the SLI layer.
952 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
954 struct lpfc_vport *vport = pmb->vport;
955 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
956 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
959 pmb->context1 = NULL;
961 /* Good status, call state machine */
962 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
963 lpfc_mbuf_free(phba, mp->virt, mp->phys);
965 mempool_free(pmb, phba->mbox_mem_pool);
972 * This routine handles processing a Fabric REG_LOGIN mailbox
973 * command upon completion. It is setup in the LPFC_MBOXQ
974 * as the completion routine when the command is
975 * handed off to the SLI layer.
978 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
980 struct lpfc_vport *vport = pmb->vport;
981 MAILBOX_t *mb = &pmb->mb;
982 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
983 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
984 ndlp = (struct lpfc_nodelist *) pmb->context2;
986 pmb->context1 = NULL;
987 pmb->context2 = NULL;
990 lpfc_mbuf_free(phba, mp->virt, mp->phys);
992 mempool_free(pmb, phba->mbox_mem_pool);
995 /* FLOGI failed, so just use loop map to make discovery list */
996 lpfc_disc_list_loopmap(vport);
998 /* Start discovery */
999 lpfc_disc_start(vport);
1003 ndlp->nlp_rpi = mb->un.varWords[0];
1004 ndlp->nlp_type |= NLP_FABRIC;
1005 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1007 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1009 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1010 /* This NPort has been assigned an NPort_ID by the fabric as a
1011 * result of the completed fabric login. Issue a State Change
1012 * Registration (SCR) ELS request to the fabric controller
1013 * (SCR_DID) so that this NPort gets RSCN events from the
1016 lpfc_issue_els_scr(vport, SCR_DID, 0);
1018 ndlp = lpfc_findnode_did(vport, NameServer_DID);
1020 /* Allocate a new node instance. If the pool is empty,
1021 * start the discovery process and skip the Nameserver
1022 * login process. This is attempted again later on.
1023 * Otherwise, issue a Port Login (PLOGI) to
1026 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1028 lpfc_disc_start(vport);
1029 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1031 mempool_free(pmb, phba->mbox_mem_pool);
1034 lpfc_nlp_init(vport, ndlp, NameServer_DID);
1035 ndlp->nlp_type |= NLP_FABRIC;
1039 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1040 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
1041 if (phba->cfg_fdmi_on) {
1042 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
1045 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
1046 ndlp_fdmi->nlp_type |= NLP_FABRIC;
1047 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
1048 lpfc_issue_els_plogi(vport, FDMI_DID, 0);
1053 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1055 mempool_free(pmb, phba->mbox_mem_pool);
1060 * This routine handles processing a NameServer REG_LOGIN mailbox
1061 * command upon completion. It is setup in the LPFC_MBOXQ
1062 * as the completion routine when the command is
1063 * handed off to the SLI layer.
1066 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1068 MAILBOX_t *mb = &pmb->mb;
1069 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1070 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1071 struct lpfc_vport *vport = pmb->vport;
1073 if (mb->mbxStatus) {
1075 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1077 mempool_free(pmb, phba->mbox_mem_pool);
1078 lpfc_drop_node(vport, ndlp);
1081 * RegLogin failed, so just use loop map to make discovery
1084 lpfc_disc_list_loopmap(vport);
1086 /* Start discovery */
1087 lpfc_disc_start(vport);
1091 pmb->context1 = NULL;
1093 ndlp->nlp_rpi = mb->un.varWords[0];
1094 ndlp->nlp_type |= NLP_FABRIC;
1095 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1097 if (vport->port_state < LPFC_VPORT_READY) {
1098 /* Link up discovery requires Fabric registration. */
1099 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RNN_ID);
1100 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RSNN_NN);
1101 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFT_ID);
1102 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFF_ID);
1105 vport->fc_ns_retry = 0;
1106 /* Good status, issue CT Request to NameServer */
1107 if (lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT)) {
1108 /* Cannot issue NameServer Query, so finish up discovery */
1109 lpfc_disc_start(vport);
1113 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1115 mempool_free(pmb, phba->mbox_mem_pool);
1121 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1123 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1124 struct fc_rport *rport;
1125 struct lpfc_rport_data *rdata;
1126 struct fc_rport_identifiers rport_ids;
1127 struct lpfc_hba *phba = vport->phba;
1129 /* Remote port has reappeared. Re-register w/ FC transport */
1130 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1131 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1132 rport_ids.port_id = ndlp->nlp_DID;
1133 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1136 * We leave our node pointer in rport->dd_data when we unregister a
1137 * FCP target port. But fc_remote_port_add zeros the space to which
1138 * rport->dd_data points. So, if we're reusing a previously
1139 * registered port, drop the reference that we took the last time we
1140 * registered the port.
1142 if (ndlp->rport && ndlp->rport->dd_data &&
1143 *(struct lpfc_rport_data **) ndlp->rport->dd_data) {
1146 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1147 if (!rport || !get_device(&rport->dev)) {
1148 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1149 "Warning: fc_remote_port_add failed\n");
1153 /* initialize static port data */
1154 rport->maxframe_size = ndlp->nlp_maxframe;
1155 rport->supported_classes = ndlp->nlp_class_sup;
1156 rdata = rport->dd_data;
1157 rdata->pnode = lpfc_nlp_get(ndlp);
1159 if (ndlp->nlp_type & NLP_FCP_TARGET)
1160 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1161 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1162 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1165 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1166 fc_remote_port_rolechg(rport, rport_ids.roles);
1168 if ((rport->scsi_target_id != -1) &&
1169 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1170 ndlp->nlp_sid = rport->scsi_target_id;
1177 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1179 struct fc_rport *rport = ndlp->rport;
1180 struct lpfc_rport_data *rdata = rport->dd_data;
1182 if (rport->scsi_target_id == -1) {
1184 rdata->pnode = NULL;
1186 put_device(&rport->dev);
1189 fc_remote_port_delete(rport);
1195 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1197 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1199 spin_lock_irq(shost->host_lock);
1201 case NLP_STE_UNUSED_NODE:
1202 vport->fc_unused_cnt += count;
1204 case NLP_STE_PLOGI_ISSUE:
1205 vport->fc_plogi_cnt += count;
1207 case NLP_STE_ADISC_ISSUE:
1208 vport->fc_adisc_cnt += count;
1210 case NLP_STE_REG_LOGIN_ISSUE:
1211 vport->fc_reglogin_cnt += count;
1213 case NLP_STE_PRLI_ISSUE:
1214 vport->fc_prli_cnt += count;
1216 case NLP_STE_UNMAPPED_NODE:
1217 vport->fc_unmap_cnt += count;
1219 case NLP_STE_MAPPED_NODE:
1220 vport->fc_map_cnt += count;
1222 case NLP_STE_NPR_NODE:
1223 vport->fc_npr_cnt += count;
1226 spin_unlock_irq(shost->host_lock);
1230 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1231 int old_state, int new_state)
1233 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1235 if (new_state == NLP_STE_UNMAPPED_NODE) {
1236 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1237 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1238 ndlp->nlp_type |= NLP_FC_NODE;
1240 if (new_state == NLP_STE_MAPPED_NODE)
1241 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1242 if (new_state == NLP_STE_NPR_NODE)
1243 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1245 /* Transport interface */
1246 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1247 old_state == NLP_STE_UNMAPPED_NODE)) {
1248 vport->phba->nport_event_cnt++;
1249 lpfc_unregister_remote_port(ndlp);
1252 if (new_state == NLP_STE_MAPPED_NODE ||
1253 new_state == NLP_STE_UNMAPPED_NODE) {
1254 vport->phba->nport_event_cnt++;
1256 * Tell the fc transport about the port, if we haven't
1257 * already. If we have, and it's a scsi entity, be
1258 * sure to unblock any attached scsi devices
1260 lpfc_register_remote_port(vport, ndlp);
1264 * if we added to Mapped list, but the remote port
1265 * registration failed or assigned a target id outside
1266 * our presentable range - move the node to the
1269 if (new_state == NLP_STE_MAPPED_NODE &&
1271 ndlp->rport->scsi_target_id == -1 ||
1272 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1273 spin_lock_irq(shost->host_lock);
1274 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1275 spin_unlock_irq(shost->host_lock);
1276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1281 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1283 static char *states[] = {
1284 [NLP_STE_UNUSED_NODE] = "UNUSED",
1285 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1286 [NLP_STE_ADISC_ISSUE] = "ADISC",
1287 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1288 [NLP_STE_PRLI_ISSUE] = "PRLI",
1289 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1290 [NLP_STE_MAPPED_NODE] = "MAPPED",
1291 [NLP_STE_NPR_NODE] = "NPR",
1294 if (state < ARRAY_SIZE(states) && states[state])
1295 strlcpy(buffer, states[state], size);
1297 snprintf(buffer, size, "unknown (%d)", state);
1302 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1305 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1306 int old_state = ndlp->nlp_state;
1307 char name1[16], name2[16];
1309 lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
1310 "%d:0904 NPort state transition x%06x, %s -> %s\n",
1311 vport->phba->brd_no,
1313 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1314 lpfc_nlp_state_name(name2, sizeof(name2), state));
1315 if (old_state == NLP_STE_NPR_NODE &&
1316 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1317 state != NLP_STE_NPR_NODE)
1318 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1319 if (old_state == NLP_STE_UNMAPPED_NODE) {
1320 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1321 ndlp->nlp_type &= ~NLP_FC_NODE;
1324 if (list_empty(&ndlp->nlp_listp)) {
1325 spin_lock_irq(shost->host_lock);
1326 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1327 spin_unlock_irq(shost->host_lock);
1328 } else if (old_state)
1329 lpfc_nlp_counters(vport, old_state, -1);
1331 ndlp->nlp_state = state;
1332 lpfc_nlp_counters(vport, state, 1);
1333 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1337 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1339 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1341 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1342 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1343 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1344 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1345 spin_lock_irq(shost->host_lock);
1346 list_del_init(&ndlp->nlp_listp);
1347 spin_unlock_irq(shost->host_lock);
1348 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 0);
1352 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1354 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1356 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1357 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1358 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1359 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1360 spin_lock_irq(shost->host_lock);
1361 list_del_init(&ndlp->nlp_listp);
1362 spin_unlock_irq(shost->host_lock);
1367 * Start / ReStart rescue timer for Discovery / RSCN handling
1370 lpfc_set_disctmo(struct lpfc_vport *vport)
1372 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1373 struct lpfc_hba *phba = vport->phba;
1376 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1377 /* For FAN, timeout should be greater then edtov */
1378 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1380 /* Normal discovery timeout should be > then ELS/CT timeout
1381 * FC spec states we need 3 * ratov for CT requests
1383 tmo = ((phba->fc_ratov * 3) + 3);
1386 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1387 spin_lock_irq(shost->host_lock);
1388 vport->fc_flag |= FC_DISC_TMO;
1389 spin_unlock_irq(shost->host_lock);
1391 /* Start Discovery Timer state <hba_state> */
1392 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1393 "%d:0247 Start Discovery Timer state x%x "
1394 "Data: x%x x%lx x%x x%x\n",
1395 phba->brd_no, vport->port_state, tmo,
1396 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1397 vport->fc_adisc_cnt);
1403 * Cancel rescue timer for Discovery / RSCN handling
1406 lpfc_can_disctmo(struct lpfc_vport *vport)
1408 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1409 struct lpfc_hba *phba = vport->phba;
1410 unsigned long iflags;
1412 /* Turn off discovery timer if its running */
1413 if (vport->fc_flag & FC_DISC_TMO) {
1414 spin_lock_irqsave(shost->host_lock, iflags);
1415 vport->fc_flag &= ~FC_DISC_TMO;
1416 spin_unlock_irqrestore(shost->host_lock, iflags);
1417 del_timer_sync(&vport->fc_disctmo);
1418 spin_lock_irqsave(&vport->work_port_lock, iflags);
1419 vport->work_port_events &= ~WORKER_DISC_TMO;
1420 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1423 /* Cancel Discovery Timer state <hba_state> */
1424 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1425 "%d:0248 Cancel Discovery Timer state x%x "
1426 "Data: x%x x%x x%x\n",
1427 phba->brd_no, vport->port_state, vport->fc_flag,
1428 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1434 * Check specified ring for outstanding IOCB on the SLI queue
1435 * Return true if iocb matches the specified nport
1438 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1439 struct lpfc_sli_ring *pring,
1440 struct lpfc_iocbq *iocb,
1441 struct lpfc_nodelist *ndlp)
1443 struct lpfc_sli *psli = &phba->sli;
1444 IOCB_t *icmd = &iocb->iocb;
1445 if (pring->ringno == LPFC_ELS_RING) {
1446 switch (icmd->ulpCommand) {
1447 case CMD_GEN_REQUEST64_CR:
1448 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1450 case CMD_ELS_REQUEST64_CR:
1451 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1453 case CMD_XMIT_ELS_RSP64_CX:
1454 if (iocb->context1 == (uint8_t *) ndlp)
1457 } else if (pring->ringno == psli->extra_ring) {
1459 } else if (pring->ringno == psli->fcp_ring) {
1460 /* Skip match check if waiting to relogin to FCP target */
1461 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1462 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1465 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1468 } else if (pring->ringno == psli->next_ring) {
1475 * Free resources / clean up outstanding I/Os
1476 * associated with nlp_rpi in the LPFC_NODELIST entry.
1479 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1481 LIST_HEAD(completions);
1482 struct lpfc_sli *psli;
1483 struct lpfc_sli_ring *pring;
1484 struct lpfc_iocbq *iocb, *next_iocb;
1489 * Everything that matches on txcmplq will be returned
1490 * by firmware with a no rpi error.
1493 rpi = ndlp->nlp_rpi;
1495 /* Now process each ring */
1496 for (i = 0; i < psli->num_rings; i++) {
1497 pring = &psli->ring[i];
1499 spin_lock_irq(&phba->hbalock);
1500 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1503 * Check to see if iocb matches the nport we are
1506 if ((lpfc_check_sli_ndlp
1507 (phba, pring, iocb, ndlp))) {
1508 /* It matches, so deque and call compl
1510 list_move_tail(&iocb->list,
1515 spin_unlock_irq(&phba->hbalock);
1519 while (!list_empty(&completions)) {
1520 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1521 list_del(&iocb->list);
1523 if (!iocb->iocb_cmpl)
1524 lpfc_sli_release_iocbq(phba, iocb);
1527 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1528 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1529 (iocb->iocb_cmpl)(phba, iocb, iocb);
1537 * Free rpi associated with LPFC_NODELIST entry.
1538 * This routine is called from lpfc_freenode(), when we are removing
1539 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1540 * LOGO that completes successfully, and we are waiting to PLOGI back
1541 * to the remote NPort. In addition, it is called after we receive
1542 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1543 * we are waiting to PLOGI back to the remote NPort.
1546 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1548 struct lpfc_hba *phba = vport->phba;
1552 if (ndlp->nlp_rpi) {
1553 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1555 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1556 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1557 rc = lpfc_sli_issue_mbox
1558 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1559 if (rc == MBX_NOT_FINISHED)
1560 mempool_free(mbox, phba->mbox_mem_pool);
1562 lpfc_no_rpi(phba, ndlp);
1570 * Free resources associated with LPFC_NODELIST entry
1571 * so it can be freed.
1574 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1576 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1577 struct lpfc_hba *phba = vport->phba;
1578 LPFC_MBOXQ_t *mb, *nextmb;
1579 struct lpfc_dmabuf *mp;
1581 /* Cleanup node for NPort <nlp_DID> */
1582 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1583 "%d:0900 Cleanup node for NPort x%x "
1584 "Data: x%x x%x x%x\n",
1585 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1586 ndlp->nlp_state, ndlp->nlp_rpi);
1588 lpfc_dequeue_node(vport, ndlp);
1590 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1591 if ((mb = phba->sli.mbox_active)) {
1592 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1593 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1594 mb->context2 = NULL;
1595 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1599 spin_lock_irq(&phba->hbalock);
1600 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1601 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1602 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1603 mp = (struct lpfc_dmabuf *) (mb->context1);
1605 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1608 list_del(&mb->list);
1609 mempool_free(mb, phba->mbox_mem_pool);
1613 spin_unlock_irq(&phba->hbalock);
1615 lpfc_els_abort(phba,ndlp);
1616 spin_lock_irq(shost->host_lock);
1617 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1618 spin_unlock_irq(shost->host_lock);
1620 ndlp->nlp_last_elscmd = 0;
1621 del_timer_sync(&ndlp->nlp_delayfunc);
1623 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1624 list_del_init(&ndlp->els_retry_evt.evt_listp);
1626 lpfc_unreg_rpi(vport, ndlp);
1632 * Check to see if we can free the nlp back to the freelist.
1633 * If we are in the middle of using the nlp in the discovery state
1634 * machine, defer the free till we reach the end of the state machine.
1637 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1639 struct lpfc_rport_data *rdata;
1641 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1642 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1645 lpfc_cleanup_node(vport, ndlp);
1648 * We should never get here with a non-NULL ndlp->rport. But
1649 * if we do, drop the reference to the rport. That seems the
1650 * intelligent thing to do.
1652 if (ndlp->rport && !(vport->load_flag & FC_UNLOADING)) {
1653 put_device(&ndlp->rport->dev);
1654 rdata = ndlp->rport->dd_data;
1655 rdata->pnode = NULL;
1661 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1664 D_ID mydid, ndlpdid, matchdid;
1666 if (did == Bcast_DID)
1669 if (ndlp->nlp_DID == 0) {
1673 /* First check for Direct match */
1674 if (ndlp->nlp_DID == did)
1677 /* Next check for area/domain identically equals 0 match */
1678 mydid.un.word = vport->fc_myDID;
1679 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1683 matchdid.un.word = did;
1684 ndlpdid.un.word = ndlp->nlp_DID;
1685 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1686 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1687 (mydid.un.b.area == matchdid.un.b.area)) {
1688 if ((ndlpdid.un.b.domain == 0) &&
1689 (ndlpdid.un.b.area == 0)) {
1690 if (ndlpdid.un.b.id)
1696 matchdid.un.word = ndlp->nlp_DID;
1697 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1698 (mydid.un.b.area == ndlpdid.un.b.area)) {
1699 if ((matchdid.un.b.domain == 0) &&
1700 (matchdid.un.b.area == 0)) {
1701 if (matchdid.un.b.id)
1709 /* Search for a nodelist entry */
1710 static struct lpfc_nodelist *
1711 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1713 struct lpfc_hba *phba = vport->phba;
1714 struct lpfc_nodelist *ndlp;
1717 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1718 if (lpfc_matchdid(vport, ndlp, did)) {
1719 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1720 ((uint32_t) ndlp->nlp_xri << 16) |
1721 ((uint32_t) ndlp->nlp_type << 8) |
1722 ((uint32_t) ndlp->nlp_rpi & 0xff));
1723 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1724 "%d:0929 FIND node DID "
1725 " Data: x%p x%x x%x x%x\n",
1727 ndlp, ndlp->nlp_DID,
1728 ndlp->nlp_flag, data1);
1733 /* FIND node did <did> NOT FOUND */
1734 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1735 "%d:0932 FIND node did x%x NOT FOUND.\n",
1740 struct lpfc_nodelist *
1741 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1743 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1744 struct lpfc_nodelist *ndlp;
1746 spin_lock_irq(shost->host_lock);
1747 ndlp = __lpfc_findnode_did(vport, did);
1748 spin_unlock_irq(shost->host_lock);
1752 struct lpfc_nodelist *
1753 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
1755 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1756 struct lpfc_nodelist *ndlp;
1758 ndlp = lpfc_findnode_did(vport, did);
1760 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
1761 lpfc_rscn_payload_check(vport, did) == 0)
1763 ndlp = (struct lpfc_nodelist *)
1764 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
1767 lpfc_nlp_init(vport, ndlp, did);
1768 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1769 spin_lock_irq(shost->host_lock);
1770 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1771 spin_unlock_irq(shost->host_lock);
1774 if (vport->fc_flag & FC_RSCN_MODE) {
1775 if (lpfc_rscn_payload_check(vport, did)) {
1776 spin_lock_irq(shost->host_lock);
1777 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1778 spin_unlock_irq(shost->host_lock);
1780 /* Since this node is marked for discovery,
1781 * delay timeout is not needed.
1783 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1784 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1788 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
1789 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
1791 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1792 spin_lock_irq(shost->host_lock);
1793 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1794 spin_unlock_irq(shost->host_lock);
1799 /* Build a list of nodes to discover based on the loopmap */
1801 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
1803 struct lpfc_hba *phba = vport->phba;
1805 uint32_t alpa, index;
1807 if (!lpfc_is_link_up(phba))
1810 if (phba->fc_topology != TOPOLOGY_LOOP)
1813 /* Check for loop map present or not */
1814 if (phba->alpa_map[0]) {
1815 for (j = 1; j <= phba->alpa_map[0]; j++) {
1816 alpa = phba->alpa_map[j];
1817 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
1819 lpfc_setup_disc_node(vport, alpa);
1822 /* No alpamap, so try all alpa's */
1823 for (j = 0; j < FC_MAXLOOP; j++) {
1824 /* If cfg_scan_down is set, start from highest
1825 * ALPA (0xef) to lowest (0x1).
1827 if (phba->cfg_scan_down)
1830 index = FC_MAXLOOP - j - 1;
1831 alpa = lpfcAlpaArray[index];
1832 if ((vport->fc_myDID & 0xff) == alpa)
1834 lpfc_setup_disc_node(vport, alpa);
1841 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1844 struct lpfc_sli *psli = &phba->sli;
1845 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
1846 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
1847 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
1850 /* Link up discovery */
1851 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
1852 phba->link_state = LPFC_CLEAR_LA;
1853 lpfc_clear_la(phba, mbox);
1854 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
1855 mbox->vport = vport;
1856 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
1858 if (rc == MBX_NOT_FINISHED) {
1859 mempool_free(mbox, phba->mbox_mem_pool);
1860 lpfc_disc_flush_list(vport);
1861 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1862 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1863 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1864 vport->port_state = LPFC_VPORT_READY;
1869 /* Start Link up / RSCN discovery on NPR nodes */
1871 lpfc_disc_start(struct lpfc_vport *vport)
1873 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1874 struct lpfc_hba *phba = vport->phba;
1875 struct lpfc_nodelist *ndlp, *next_ndlp;
1877 uint32_t clear_la_pending;
1880 if (!lpfc_is_link_up(phba))
1883 if (phba->link_state == LPFC_CLEAR_LA)
1884 clear_la_pending = 1;
1886 clear_la_pending = 0;
1888 if (vport->port_state < LPFC_VPORT_READY)
1889 vport->port_state = LPFC_DISC_AUTH;
1891 lpfc_set_disctmo(vport);
1893 if (vport->fc_prevDID == vport->fc_myDID)
1898 vport->fc_prevDID = vport->fc_myDID;
1899 vport->num_disc_nodes = 0;
1901 /* Start Discovery state <hba_state> */
1902 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1903 "%d:0202 Start Discovery hba state x%x "
1904 "Data: x%x x%x x%x\n",
1905 phba->brd_no, vport->port_state, vport->fc_flag,
1906 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1908 /* If our did changed, we MUST do PLOGI */
1909 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1910 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1911 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1913 spin_lock_irq(shost->host_lock);
1914 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1915 spin_unlock_irq(shost->host_lock);
1919 /* First do ADISCs - if any */
1920 num_sent = lpfc_els_disc_adisc(vport);
1925 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
1926 if (vport->port_type == LPFC_PHYSICAL_PORT) {
1927 /* If we get here, there is nothing to ADISC */
1928 printk(KERN_ERR "%s (%d): do clear_la\n",
1929 __FUNCTION__, __LINE__);
1930 lpfc_issue_clear_la(phba, vport);
1931 } else if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1933 vport->num_disc_nodes = 0;
1934 /* go thru NPR nodes and issue ELS PLOGIs */
1935 if (vport->fc_npr_cnt)
1936 lpfc_els_disc_plogi(vport);
1938 if (!vport->num_disc_nodes) {
1939 spin_lock_irq(shost->host_lock);
1940 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1941 spin_unlock_irq(shost->host_lock);
1943 printk(KERN_ERR "%s (%d): vport ready\n",
1944 __FUNCTION__, __LINE__);
1945 vport->port_state = LPFC_VPORT_READY;
1948 /* Next do PLOGIs - if any */
1949 num_sent = lpfc_els_disc_plogi(vport);
1954 if (vport->fc_flag & FC_RSCN_MODE) {
1955 /* Check to see if more RSCNs came in while we
1956 * were processing this one.
1958 if ((vport->fc_rscn_id_cnt == 0) &&
1959 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
1960 spin_lock_irq(shost->host_lock);
1961 vport->fc_flag &= ~FC_RSCN_MODE;
1962 spin_unlock_irq(shost->host_lock);
1964 lpfc_els_handle_rscn(vport);
1971 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
1972 * ring the match the sppecified nodelist.
1975 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1977 LIST_HEAD(completions);
1978 struct lpfc_sli *psli;
1980 struct lpfc_iocbq *iocb, *next_iocb;
1981 struct lpfc_sli_ring *pring;
1984 pring = &psli->ring[LPFC_ELS_RING];
1986 /* Error matching iocb on txq or txcmplq
1987 * First check the txq.
1989 spin_lock_irq(&phba->hbalock);
1990 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1991 if (iocb->context1 != ndlp) {
1995 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
1996 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
1998 list_move_tail(&iocb->list, &completions);
2003 /* Next check the txcmplq */
2004 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2005 if (iocb->context1 != ndlp) {
2009 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2010 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2011 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2014 spin_unlock_irq(&phba->hbalock);
2016 while (!list_empty(&completions)) {
2017 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2018 list_del(&iocb->list);
2020 if (!iocb->iocb_cmpl)
2021 lpfc_sli_release_iocbq(phba, iocb);
2024 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2025 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2026 (iocb->iocb_cmpl) (phba, iocb, iocb);
2032 lpfc_disc_flush_list(struct lpfc_vport *vport)
2034 struct lpfc_nodelist *ndlp, *next_ndlp;
2035 struct lpfc_hba *phba = vport->phba;
2037 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2038 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2040 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2041 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2042 lpfc_free_tx(phba, ndlp);
2049 /*****************************************************************************/
2051 * NAME: lpfc_disc_timeout
2053 * FUNCTION: Fibre Channel driver discovery timeout routine.
2055 * EXECUTION ENVIRONMENT: interrupt only
2063 /*****************************************************************************/
2065 lpfc_disc_timeout(unsigned long ptr)
2067 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2068 struct lpfc_hba *phba = vport->phba;
2069 unsigned long flags = 0;
2071 if (unlikely(!phba))
2074 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2075 spin_lock_irqsave(&vport->work_port_lock, flags);
2076 vport->work_port_events |= WORKER_DISC_TMO;
2077 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2079 if (phba->work_wait)
2080 wake_up(phba->work_wait);
2086 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2088 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2089 struct lpfc_hba *phba = vport->phba;
2090 struct lpfc_sli *psli = &phba->sli;
2091 struct lpfc_nodelist *ndlp, *next_ndlp;
2092 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2093 int rc, clrlaerr = 0;
2095 if (!(vport->fc_flag & FC_DISC_TMO))
2099 spin_lock_irq(shost->host_lock);
2100 vport->fc_flag &= ~FC_DISC_TMO;
2101 spin_unlock_irq(shost->host_lock);
2103 printk(KERN_ERR "%s (%d): link_state = %d, port_state = %d\n",
2104 __FUNCTION__, __LINE__, phba->link_state, vport->port_state);
2105 switch (vport->port_state) {
2107 case LPFC_LOCAL_CFG_LINK:
2108 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2112 lpfc_printf_log(phba,
2115 "%d:0221 FAN timeout\n",
2118 /* Start discovery by sending FLOGI, clean up old rpis */
2119 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2121 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2123 if (ndlp->nlp_type & NLP_FABRIC) {
2124 /* Clean up the ndlp on Fabric connections */
2125 lpfc_drop_node(vport, ndlp);
2126 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2127 /* Fail outstanding IO now since device
2128 * is marked for PLOGI.
2130 lpfc_unreg_rpi(vport, ndlp);
2133 vport->port_state = LPFC_FLOGI;
2134 lpfc_set_disctmo(vport);
2135 lpfc_initial_flogi(vport);
2139 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2140 /* Initial FLOGI timeout */
2141 lpfc_printf_log(phba,
2144 "%d:0222 Initial FLOGI timeout\n",
2147 /* Assume no Fabric and go on with discovery.
2148 * Check for outstanding ELS FLOGI to abort.
2151 /* FLOGI failed, so just use loop map to make discovery list */
2152 lpfc_disc_list_loopmap(vport);
2154 /* Start discovery */
2155 lpfc_disc_start(vport);
2158 case LPFC_FABRIC_CFG_LINK:
2159 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2161 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2162 "%d:0223 Timeout while waiting for NameServer "
2163 "login\n", phba->brd_no);
2165 /* Next look for NameServer ndlp */
2166 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2169 /* Start discovery */
2170 lpfc_disc_start(vport);
2174 /* Check for wait for NameServer Rsp timeout */
2175 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2176 "%d:0224 NameServer Query timeout "
2179 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2181 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2182 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2183 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2184 /* Try it one more time */
2185 rc = lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT);
2189 vport->fc_ns_retry = 0;
2192 /* Nothing to authenticate, so CLEAR_LA right now */
2193 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2196 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2197 "%d:0226 Device Discovery "
2198 "completion error\n",
2200 phba->link_state = LPFC_HBA_ERROR;
2204 phba->link_state = LPFC_CLEAR_LA;
2205 lpfc_clear_la(phba, clearlambox);
2206 printk(KERN_ERR "%s (%d): do clear_la\n",
2207 __FUNCTION__, __LINE__);
2208 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2209 clearlambox->vport = vport;
2210 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2211 (MBX_NOWAIT | MBX_STOP_IOCB));
2212 if (rc == MBX_NOT_FINISHED) {
2213 mempool_free(clearlambox, phba->mbox_mem_pool);
2218 /* Setup and issue mailbox INITIALIZE LINK command */
2219 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2220 if (!initlinkmbox) {
2221 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2222 "%d:0206 Device Discovery "
2223 "completion error\n",
2225 phba->link_state = LPFC_HBA_ERROR;
2229 lpfc_linkdown(phba);
2230 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2231 phba->cfg_link_speed);
2232 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2233 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2234 (MBX_NOWAIT | MBX_STOP_IOCB));
2235 lpfc_set_loopback_flag(phba);
2236 if (rc == MBX_NOT_FINISHED)
2237 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2241 case LPFC_DISC_AUTH:
2242 /* Node Authentication timeout */
2243 lpfc_printf_log(phba,
2246 "%d:0227 Node Authentication timeout\n",
2248 lpfc_disc_flush_list(vport);
2250 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2253 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2254 "%d:0207 Device Discovery "
2255 "completion error\n",
2257 phba->link_state = LPFC_HBA_ERROR;
2260 phba->link_state = LPFC_CLEAR_LA;
2261 lpfc_clear_la(phba, clearlambox);
2262 printk(KERN_ERR "%s (%d): do clear_la\n",
2263 __FUNCTION__, __LINE__);
2264 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2265 clearlambox->vport = vport;
2266 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2267 (MBX_NOWAIT | MBX_STOP_IOCB));
2268 if (rc == MBX_NOT_FINISHED) {
2269 mempool_free(clearlambox, phba->mbox_mem_pool);
2274 case LPFC_VPORT_READY:
2275 if (vport->fc_flag & FC_RSCN_MODE) {
2276 lpfc_printf_log(phba,
2279 "%d:0231 RSCN timeout Data: x%x x%x\n",
2281 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2283 /* Cleanup any outstanding ELS commands */
2284 lpfc_els_flush_cmd(vport);
2286 lpfc_els_flush_rscn(vport);
2287 lpfc_disc_flush_list(vport);
2291 case LPFC_STATE_UNKNOWN:
2293 case LPFC_BUILD_DISC_LIST:
2294 lpfc_printf_log(phba,
2297 "%d:0229 Unexpected discovery timeout, vport "
2305 switch (phba->link_state) {
2307 /* CLEAR LA timeout */
2308 lpfc_printf_log(phba,
2311 "%d:0228 CLEAR LA timeout\n",
2316 case LPFC_LINK_UNKNOWN:
2317 case LPFC_WARM_START:
2318 case LPFC_INIT_START:
2319 case LPFC_INIT_MBX_CMDS:
2320 case LPFC_LINK_DOWN:
2322 case LPFC_HBA_ERROR:
2323 lpfc_printf_log(phba,
2326 "%d:0230 Unexpected timeout, hba link "
2328 phba->brd_no, phba->link_state);
2334 lpfc_disc_flush_list(vport);
2335 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2336 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2337 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2338 printk(KERN_ERR "%s (%d): vport ready\n",
2339 __FUNCTION__, __LINE__);
2340 vport->port_state = LPFC_VPORT_READY;
2347 * This routine handles processing a NameServer REG_LOGIN mailbox
2348 * command upon completion. It is setup in the LPFC_MBOXQ
2349 * as the completion routine when the command is
2350 * handed off to the SLI layer.
2353 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2355 MAILBOX_t *mb = &pmb->mb;
2356 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2357 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2358 struct lpfc_vport *vport = pmb->vport;
2360 pmb->context1 = NULL;
2362 ndlp->nlp_rpi = mb->un.varWords[0];
2363 ndlp->nlp_type |= NLP_FABRIC;
2364 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2367 * Start issuing Fabric-Device Management Interface (FDMI) command to
2368 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2369 * fdmi-on=2 (supporting RPA/hostnmae)
2372 if (phba->cfg_fdmi_on == 1)
2373 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2375 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2377 /* Mailbox took a reference to the node */
2379 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2381 mempool_free(pmb, phba->mbox_mem_pool);
2387 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2389 uint16_t *rpi = param;
2391 return ndlp->nlp_rpi == *rpi;
2395 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2397 return memcmp(&ndlp->nlp_portname, param,
2398 sizeof(ndlp->nlp_portname)) == 0;
2401 struct lpfc_nodelist *
2402 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2404 struct lpfc_nodelist *ndlp;
2406 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2407 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2408 filter(ndlp, param))
2415 * Search node lists for a remote port matching filter criteria
2416 * This routine is used when the caller does NOT have host_lock.
2418 struct lpfc_nodelist *
2419 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2421 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2422 struct lpfc_nodelist *ndlp;
2424 spin_lock_irq(shost->host_lock);
2425 ndlp = __lpfc_find_node(vport, filter, param);
2426 spin_unlock_irq(shost->host_lock);
2431 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2432 * returns the node list element pointer else return NULL.
2434 struct lpfc_nodelist *
2435 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2437 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2440 struct lpfc_nodelist *
2441 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2443 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2444 struct lpfc_nodelist *ndlp;
2446 spin_lock_irq(shost->host_lock);
2447 ndlp = __lpfc_findnode_rpi(vport, rpi);
2448 spin_unlock_irq(shost->host_lock);
2453 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2454 * returns the node element list pointer else return NULL.
2456 struct lpfc_nodelist *
2457 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2459 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2460 struct lpfc_nodelist *ndlp;
2462 spin_lock_irq(shost->host_lock);
2463 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2464 spin_unlock_irq(shost->host_lock);
2469 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2472 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2473 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2474 init_timer(&ndlp->nlp_delayfunc);
2475 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2476 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2477 ndlp->nlp_DID = did;
2478 ndlp->vport = vport;
2479 ndlp->nlp_sid = NLP_NO_SID;
2480 INIT_LIST_HEAD(&ndlp->nlp_listp);
2481 kref_init(&ndlp->kref);
2486 lpfc_nlp_release(struct kref *kref)
2488 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2490 lpfc_nlp_remove(ndlp->vport, ndlp);
2491 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2494 struct lpfc_nodelist *
2495 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2498 kref_get(&ndlp->kref);
2503 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2505 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;