1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41 #include "lpfc_vport.h"
43 #define LPFC_RESET_WAIT 2
44 #define LPFC_ABORT_WAIT 2
47 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object.
49 * @lpfc_cmd: lpfc scsi command object pointer.
51 * This function is called when there is a command completion and this
52 * function updates the statistical data for the command completion.
55 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
57 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 struct lpfc_nodelist *pnode = rdata->pnode;
59 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
61 struct Scsi_Host *shost = cmd->device->host;
62 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 unsigned long latency;
69 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
71 spin_lock_irqsave(shost->host_lock, flags);
72 if (!vport->stat_data_enabled ||
73 vport->stat_data_blocked ||
75 (phba->bucket_type == LPFC_NO_BUCKET)) {
76 spin_unlock_irqrestore(shost->host_lock, flags);
80 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
81 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
83 /* check array subscript bounds */
86 else if (i >= LPFC_MAX_BUCKET_COUNT)
87 i = LPFC_MAX_BUCKET_COUNT - 1;
89 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
90 if (latency <= (phba->bucket_base +
91 ((1<<i)*phba->bucket_step)))
95 pnode->lat_data[i].cmd_count++;
96 spin_unlock_irqrestore(shost->host_lock, flags);
100 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
102 * @phba: Pointer to HBA context object.
103 * @vport: Pointer to vport object.
104 * @ndlp: Pointer to FC node associated with the target.
105 * @lun: Lun number of the scsi device.
106 * @old_val: Old value of the queue depth.
107 * @new_val: New value of the queue depth.
109 * This function sends an event to the mgmt application indicating
110 * there is a change in the scsi device queue depth.
113 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
114 struct lpfc_vport *vport,
115 struct lpfc_nodelist *ndlp,
120 struct lpfc_fast_path_event *fast_path_evt;
123 fast_path_evt = lpfc_alloc_fast_evt(phba);
127 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
129 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
130 LPFC_EVENT_VARQUEDEPTH;
132 /* Report all luns with change in queue depth */
133 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
134 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
135 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
136 &ndlp->nlp_portname, sizeof(struct lpfc_name));
137 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
138 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
141 fast_path_evt->un.queue_depth_evt.oldval = old_val;
142 fast_path_evt->un.queue_depth_evt.newval = new_val;
143 fast_path_evt->vport = vport;
145 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
146 spin_lock_irqsave(&phba->hbalock, flags);
147 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
148 spin_unlock_irqrestore(&phba->hbalock, flags);
149 lpfc_worker_wake_up(phba);
155 * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
156 * @phba: The Hba for which this call is being executed.
158 * This routine is called when there is resource error in driver or firmware.
159 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
160 * posts at most 1 event each second. This routine wakes up worker thread of
161 * @phba to process WORKER_RAM_DOWN_EVENT event.
163 * This routine should be called with no lock held.
166 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
171 spin_lock_irqsave(&phba->hbalock, flags);
172 atomic_inc(&phba->num_rsrc_err);
173 phba->last_rsrc_error_time = jiffies;
175 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
176 spin_unlock_irqrestore(&phba->hbalock, flags);
180 phba->last_ramp_down_time = jiffies;
182 spin_unlock_irqrestore(&phba->hbalock, flags);
184 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
185 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
187 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
188 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
191 lpfc_worker_wake_up(phba);
196 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
197 * @phba: The Hba for which this call is being executed.
199 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
200 * post at most 1 event every 5 minute after last_ramp_up_time or
201 * last_rsrc_error_time. This routine wakes up worker thread of @phba
202 * to process WORKER_RAM_DOWN_EVENT event.
204 * This routine should be called with no lock held.
207 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
208 struct scsi_device *sdev)
211 struct lpfc_hba *phba = vport->phba;
213 atomic_inc(&phba->num_cmd_success);
215 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
217 spin_lock_irqsave(&phba->hbalock, flags);
218 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
219 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
220 spin_unlock_irqrestore(&phba->hbalock, flags);
223 phba->last_ramp_up_time = jiffies;
224 spin_unlock_irqrestore(&phba->hbalock, flags);
226 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
227 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
229 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
230 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
233 lpfc_worker_wake_up(phba);
238 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
239 * @phba: The Hba for which this call is being executed.
241 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
242 * thread.This routine reduces queue depth for all scsi device on each vport
243 * associated with @phba.
246 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
248 struct lpfc_vport **vports;
249 struct Scsi_Host *shost;
250 struct scsi_device *sdev;
251 unsigned long new_queue_depth, old_queue_depth;
252 unsigned long num_rsrc_err, num_cmd_success;
254 struct lpfc_rport_data *rdata;
256 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
257 num_cmd_success = atomic_read(&phba->num_cmd_success);
259 vports = lpfc_create_vport_work_array(phba);
261 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
262 shost = lpfc_shost_from_vport(vports[i]);
263 shost_for_each_device(sdev, shost) {
265 sdev->queue_depth * num_rsrc_err /
266 (num_rsrc_err + num_cmd_success);
267 if (!new_queue_depth)
268 new_queue_depth = sdev->queue_depth - 1;
270 new_queue_depth = sdev->queue_depth -
272 old_queue_depth = sdev->queue_depth;
273 if (sdev->ordered_tags)
274 scsi_adjust_queue_depth(sdev,
278 scsi_adjust_queue_depth(sdev,
281 rdata = sdev->hostdata;
283 lpfc_send_sdev_queuedepth_change_event(
286 sdev->lun, old_queue_depth,
290 lpfc_destroy_vport_work_array(phba, vports);
291 atomic_set(&phba->num_rsrc_err, 0);
292 atomic_set(&phba->num_cmd_success, 0);
296 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
297 * @phba: The Hba for which this call is being executed.
299 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
300 * thread.This routine increases queue depth for all scsi device on each vport
301 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
302 * num_cmd_success to zero.
305 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
307 struct lpfc_vport **vports;
308 struct Scsi_Host *shost;
309 struct scsi_device *sdev;
311 struct lpfc_rport_data *rdata;
313 vports = lpfc_create_vport_work_array(phba);
315 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
316 shost = lpfc_shost_from_vport(vports[i]);
317 shost_for_each_device(sdev, shost) {
318 if (vports[i]->cfg_lun_queue_depth <=
321 if (sdev->ordered_tags)
322 scsi_adjust_queue_depth(sdev,
324 sdev->queue_depth+1);
326 scsi_adjust_queue_depth(sdev,
328 sdev->queue_depth+1);
329 rdata = sdev->hostdata;
331 lpfc_send_sdev_queuedepth_change_event(
335 sdev->queue_depth - 1,
339 lpfc_destroy_vport_work_array(phba, vports);
340 atomic_set(&phba->num_rsrc_err, 0);
341 atomic_set(&phba->num_cmd_success, 0);
345 * lpfc_scsi_dev_block: set all scsi hosts to block state.
346 * @phba: Pointer to HBA context object.
348 * This function walks vport list and set each SCSI host to block state
349 * by invoking fc_remote_port_delete() routine. This function is invoked
350 * with EEH when device's PCI slot has been permanently disabled.
353 lpfc_scsi_dev_block(struct lpfc_hba *phba)
355 struct lpfc_vport **vports;
356 struct Scsi_Host *shost;
357 struct scsi_device *sdev;
358 struct fc_rport *rport;
361 vports = lpfc_create_vport_work_array(phba);
363 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
364 shost = lpfc_shost_from_vport(vports[i]);
365 shost_for_each_device(sdev, shost) {
366 rport = starget_to_rport(scsi_target(sdev));
367 fc_remote_port_delete(rport);
370 lpfc_destroy_vport_work_array(phba, vports);
374 * lpfc_new_scsi_buf: Scsi buffer allocator.
375 * @vport: The virtual port for which this call being executed.
377 * This routine allocates a scsi buffer, which contains all the necessary
378 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
379 * contains information to build the IOCB. The DMAable region contains
380 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
381 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
382 * and the BPL BDE is setup in the IOCB.
386 * Pointer to lpfc_scsi_buf data structure - Success
388 static struct lpfc_scsi_buf *
389 lpfc_new_scsi_buf(struct lpfc_vport *vport)
391 struct lpfc_hba *phba = vport->phba;
392 struct lpfc_scsi_buf *psb;
393 struct ulp_bde64 *bpl;
395 dma_addr_t pdma_phys_fcp_cmd;
396 dma_addr_t pdma_phys_fcp_rsp;
397 dma_addr_t pdma_phys_bpl;
400 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
405 * Get memory from the pci pool to map the virt space to pci bus space
406 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
407 * struct fcp_rsp and the number of bde's necessary to support the
410 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
417 /* Initialize virtual ptrs to dma_buf region. */
418 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
420 /* Allocate iotag for psb->cur_iocbq. */
421 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
423 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
424 psb->data, psb->dma_handle);
428 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
430 psb->fcp_cmnd = psb->data;
431 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
432 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
433 sizeof(struct fcp_rsp);
435 /* Initialize local short-hand pointers. */
437 pdma_phys_fcp_cmd = psb->dma_handle;
438 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
439 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
440 sizeof(struct fcp_rsp);
443 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
444 * list bdes. Initialize the first two and leave the rest for
447 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
448 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
449 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
450 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
451 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
453 /* Setup the physical region for the FCP RSP */
454 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
455 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
456 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
457 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
458 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
461 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
462 * initialize it with all known data now.
464 iocb = &psb->cur_iocbq.iocb;
465 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
466 if (phba->sli_rev == 3) {
467 /* fill in immediate fcp command BDE */
468 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
469 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
470 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
472 iocb->un.fcpi64.bdl.addrHigh = 0;
473 iocb->ulpBdeCount = 0;
475 /* fill in responce BDE */
476 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
477 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
478 sizeof(struct fcp_rsp);
479 iocb->unsli3.fcp_ext.rbde.addrLow =
480 putPaddrLow(pdma_phys_fcp_rsp);
481 iocb->unsli3.fcp_ext.rbde.addrHigh =
482 putPaddrHigh(pdma_phys_fcp_rsp);
484 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
485 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
486 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
487 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
488 iocb->ulpBdeCount = 1;
491 iocb->ulpClass = CLASS3;
497 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
498 * @phba: The Hba for which this call is being executed.
500 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
501 * and returns to caller.
505 * Pointer to lpfc_scsi_buf - Success
507 static struct lpfc_scsi_buf*
508 lpfc_get_scsi_buf(struct lpfc_hba * phba)
510 struct lpfc_scsi_buf * lpfc_cmd = NULL;
511 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
512 unsigned long iflag = 0;
514 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
515 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
517 lpfc_cmd->seg_cnt = 0;
518 lpfc_cmd->nonsg_phys = 0;
520 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
525 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
526 * @phba: The Hba for which this call is being executed.
527 * @psb: The scsi buffer which is being released.
529 * This routine releases @psb scsi buffer by adding it to tail of @phba
530 * lpfc_scsi_buf_list list.
533 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
535 unsigned long iflag = 0;
537 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
539 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
540 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
544 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
545 * @phba: The Hba for which this call is being executed.
546 * @lpfc_cmd: The scsi buffer which is going to be mapped.
548 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
549 * field of @lpfc_cmd. This routine scans through sg elements and format the
550 * bdea. This routine also initializes all IOCB fields which are dependent on
551 * scsi command request buffer.
558 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
560 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
561 struct scatterlist *sgel = NULL;
562 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
563 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
564 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
565 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
567 uint32_t num_bde = 0;
568 int nseg, datadir = scsi_cmnd->sc_data_direction;
571 * There are three possibilities here - use scatter-gather segment, use
572 * the single mapping, or neither. Start the lpfc command prep by
573 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
577 if (scsi_sg_count(scsi_cmnd)) {
579 * The driver stores the segment count returned from pci_map_sg
580 * because this a count of dma-mappings used to map the use_sg
581 * pages. They are not guaranteed to be the same for those
582 * architectures that implement an IOMMU.
585 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
586 scsi_sg_count(scsi_cmnd), datadir);
590 lpfc_cmd->seg_cnt = nseg;
591 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
592 printk(KERN_ERR "%s: Too many sg segments from "
593 "dma_map_sg. Config %d, seg_cnt %d",
594 __func__, phba->cfg_sg_seg_cnt,
596 scsi_dma_unmap(scsi_cmnd);
601 * The driver established a maximum scatter-gather segment count
602 * during probe that limits the number of sg elements in any
603 * single scsi command. Just run through the seg_cnt and format
605 * When using SLI-3 the driver will try to fit all the BDEs into
606 * the IOCB. If it can't then the BDEs get added to a BPL as it
607 * does for SLI-2 mode.
609 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
610 physaddr = sg_dma_address(sgel);
611 if (phba->sli_rev == 3 &&
612 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
613 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
614 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
615 data_bde->addrLow = putPaddrLow(physaddr);
616 data_bde->addrHigh = putPaddrHigh(physaddr);
619 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
620 bpl->tus.f.bdeSize = sg_dma_len(sgel);
621 bpl->tus.w = le32_to_cpu(bpl->tus.w);
623 le32_to_cpu(putPaddrLow(physaddr));
625 le32_to_cpu(putPaddrHigh(physaddr));
632 * Finish initializing those IOCB fields that are dependent on the
633 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
634 * explicitly reinitialized and for SLI-3 the extended bde count is
635 * explicitly reinitialized since all iocb memory resources are reused.
637 if (phba->sli_rev == 3) {
638 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
640 * The extended IOCB format can only fit 3 BDE or a BPL.
641 * This I/O has more than 3 BDE so the 1st data bde will
642 * be a BPL that is filled in here.
644 physaddr = lpfc_cmd->dma_handle;
645 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
646 data_bde->tus.f.bdeSize = (num_bde *
647 sizeof(struct ulp_bde64));
648 physaddr += (sizeof(struct fcp_cmnd) +
649 sizeof(struct fcp_rsp) +
650 (2 * sizeof(struct ulp_bde64)));
651 data_bde->addrHigh = putPaddrHigh(physaddr);
652 data_bde->addrLow = putPaddrLow(physaddr);
653 /* ebde count includes the responce bde and data bpl */
654 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
656 /* ebde count includes the responce bde and data bdes */
657 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
660 iocb_cmd->un.fcpi64.bdl.bdeSize =
661 ((num_bde + 2) * sizeof(struct ulp_bde64));
663 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
668 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
669 * @phba: Pointer to hba context object.
670 * @vport: Pointer to vport object.
671 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
672 * @rsp_iocb: Pointer to response iocb object which reported error.
674 * This function posts an event when there is a SCSI command reporting
675 * error from the scsi device.
678 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
679 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
680 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
681 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
682 uint32_t resp_info = fcprsp->rspStatus2;
683 uint32_t scsi_status = fcprsp->rspStatus3;
684 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
685 struct lpfc_fast_path_event *fast_path_evt = NULL;
686 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
689 /* If there is queuefull or busy condition send a scsi event */
690 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
691 (cmnd->result == SAM_STAT_BUSY)) {
692 fast_path_evt = lpfc_alloc_fast_evt(phba);
695 fast_path_evt->un.scsi_evt.event_type =
697 fast_path_evt->un.scsi_evt.subcategory =
698 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
699 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
700 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
701 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
702 &pnode->nlp_portname, sizeof(struct lpfc_name));
703 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
704 &pnode->nlp_nodename, sizeof(struct lpfc_name));
705 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
706 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
707 fast_path_evt = lpfc_alloc_fast_evt(phba);
710 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
712 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
713 LPFC_EVENT_CHECK_COND;
714 fast_path_evt->un.check_cond_evt.scsi_event.lun =
716 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
717 &pnode->nlp_portname, sizeof(struct lpfc_name));
718 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
719 &pnode->nlp_nodename, sizeof(struct lpfc_name));
720 fast_path_evt->un.check_cond_evt.sense_key =
721 cmnd->sense_buffer[2] & 0xf;
722 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
723 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
724 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
726 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
727 ((scsi_status == SAM_STAT_GOOD) &&
728 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
730 * If status is good or resid does not match with fcp_param and
731 * there is valid fcpi_parm, then there is a read_check error
733 fast_path_evt = lpfc_alloc_fast_evt(phba);
736 fast_path_evt->un.read_check_error.header.event_type =
738 fast_path_evt->un.read_check_error.header.subcategory =
739 LPFC_EVENT_FCPRDCHKERR;
740 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
741 &pnode->nlp_portname, sizeof(struct lpfc_name));
742 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
743 &pnode->nlp_nodename, sizeof(struct lpfc_name));
744 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
745 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
746 fast_path_evt->un.read_check_error.fcpiparam =
751 fast_path_evt->vport = vport;
752 spin_lock_irqsave(&phba->hbalock, flags);
753 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
754 spin_unlock_irqrestore(&phba->hbalock, flags);
755 lpfc_worker_wake_up(phba);
760 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
761 * @phba: The Hba for which this call is being executed.
762 * @psb: The scsi buffer which is going to be un-mapped.
764 * This routine does DMA un-mapping of scatter gather list of scsi command
765 * field of @lpfc_cmd.
768 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
771 * There are only two special cases to consider. (1) the scsi command
772 * requested scatter-gather usage or (2) the scsi command allocated
773 * a request buffer, but did not request use_sg. There is a third
774 * case, but it does not require resource deallocation.
776 if (psb->seg_cnt > 0)
777 scsi_dma_unmap(psb->pCmd);
781 * lpfc_handler_fcp_err: FCP response handler.
782 * @vport: The virtual port for which this call is being executed.
783 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
784 * @rsp_iocb: The response IOCB which contains FCP error.
786 * This routine is called to process response IOCB with status field
787 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
788 * based upon SCSI and FCP error.
791 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
792 struct lpfc_iocbq *rsp_iocb)
794 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
795 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
796 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
797 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
798 uint32_t resp_info = fcprsp->rspStatus2;
799 uint32_t scsi_status = fcprsp->rspStatus3;
801 uint32_t host_status = DID_OK;
803 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
807 * If this is a task management command, there is no
808 * scsi packet associated with this lpfc_cmd. The driver
811 if (fcpcmd->fcpCntl2) {
816 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
817 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
818 if (snslen > SCSI_SENSE_BUFFERSIZE)
819 snslen = SCSI_SENSE_BUFFERSIZE;
821 if (resp_info & RSP_LEN_VALID)
822 rsplen = be32_to_cpu(fcprsp->rspRspLen);
823 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
825 lp = (uint32_t *)cmnd->sense_buffer;
827 if (!scsi_status && (resp_info & RESID_UNDER))
830 lpfc_printf_vlog(vport, KERN_WARNING, logit,
831 "0730 FCP command x%x failed: x%x SNS x%x x%x "
832 "Data: x%x x%x x%x x%x x%x\n",
833 cmnd->cmnd[0], scsi_status,
834 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
835 be32_to_cpu(fcprsp->rspResId),
836 be32_to_cpu(fcprsp->rspSnsLen),
837 be32_to_cpu(fcprsp->rspRspLen),
840 if (resp_info & RSP_LEN_VALID) {
841 rsplen = be32_to_cpu(fcprsp->rspRspLen);
842 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
843 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
844 host_status = DID_ERROR;
849 scsi_set_resid(cmnd, 0);
850 if (resp_info & RESID_UNDER) {
851 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
853 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
854 "0716 FCP Read Underrun, expected %d, "
855 "residual %d Data: x%x x%x x%x\n",
856 be32_to_cpu(fcpcmd->fcpDl),
857 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
861 * If there is an under run check if under run reported by
862 * storage array is same as the under run reported by HBA.
863 * If this is not same, there is a dropped frame.
865 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
867 (scsi_get_resid(cmnd) != fcpi_parm)) {
868 lpfc_printf_vlog(vport, KERN_WARNING,
869 LOG_FCP | LOG_FCP_ERROR,
870 "0735 FCP Read Check Error "
871 "and Underrun Data: x%x x%x x%x x%x\n",
872 be32_to_cpu(fcpcmd->fcpDl),
873 scsi_get_resid(cmnd), fcpi_parm,
875 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
876 host_status = DID_ERROR;
879 * The cmnd->underflow is the minimum number of bytes that must
880 * be transfered for this command. Provided a sense condition
881 * is not present, make sure the actual amount transferred is at
882 * least the underflow value or fail.
884 if (!(resp_info & SNS_LEN_VALID) &&
885 (scsi_status == SAM_STAT_GOOD) &&
886 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
887 < cmnd->underflow)) {
888 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
889 "0717 FCP command x%x residual "
890 "underrun converted to error "
891 "Data: x%x x%x x%x\n",
892 cmnd->cmnd[0], scsi_bufflen(cmnd),
893 scsi_get_resid(cmnd), cmnd->underflow);
894 host_status = DID_ERROR;
896 } else if (resp_info & RESID_OVER) {
897 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
898 "0720 FCP command x%x residual overrun error. "
899 "Data: x%x x%x \n", cmnd->cmnd[0],
900 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
901 host_status = DID_ERROR;
904 * Check SLI validation that all the transfer was actually done
905 * (fcpi_parm should be zero). Apply check only to reads.
907 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
908 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
909 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
910 "0734 FCP Read Check Error Data: "
912 be32_to_cpu(fcpcmd->fcpDl),
913 be32_to_cpu(fcprsp->rspResId),
914 fcpi_parm, cmnd->cmnd[0]);
915 host_status = DID_ERROR;
916 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
920 cmnd->result = ScsiResult(host_status, scsi_status);
921 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
925 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
926 * @phba: The Hba for which this call is being executed.
927 * @pIocbIn: The command IOCBQ for the scsi cmnd.
928 * @pIocbOut: The response IOCBQ for the scsi cmnd .
930 * This routine assigns scsi command result by looking into response IOCB
931 * status field appropriately. This routine handles QUEUE FULL condition as
932 * well by ramping down device queue depth.
935 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
936 struct lpfc_iocbq *pIocbOut)
938 struct lpfc_scsi_buf *lpfc_cmd =
939 (struct lpfc_scsi_buf *) pIocbIn->context1;
940 struct lpfc_vport *vport = pIocbIn->vport;
941 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
942 struct lpfc_nodelist *pnode = rdata->pnode;
943 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
945 struct scsi_device *sdev, *tmp_sdev;
948 struct lpfc_fast_path_event *fast_path_evt;
950 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
951 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
952 if (pnode && NLP_CHK_NODE_ACT(pnode))
953 atomic_dec(&pnode->cmd_pending);
955 if (lpfc_cmd->status) {
956 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
957 (lpfc_cmd->result & IOERR_DRVR_MASK))
958 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
959 else if (lpfc_cmd->status >= IOSTAT_CNT)
960 lpfc_cmd->status = IOSTAT_DEFAULT;
962 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
963 "0729 FCP cmd x%x failed <%d/%d> "
964 "status: x%x result: x%x Data: x%x x%x\n",
966 cmd->device ? cmd->device->id : 0xffff,
967 cmd->device ? cmd->device->lun : 0xffff,
968 lpfc_cmd->status, lpfc_cmd->result,
969 pIocbOut->iocb.ulpContext,
970 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
972 switch (lpfc_cmd->status) {
973 case IOSTAT_FCP_RSP_ERROR:
974 /* Call FCP RSP handler to determine result */
975 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
977 case IOSTAT_NPORT_BSY:
978 case IOSTAT_FABRIC_BSY:
979 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
980 fast_path_evt = lpfc_alloc_fast_evt(phba);
983 fast_path_evt->un.fabric_evt.event_type =
985 fast_path_evt->un.fabric_evt.subcategory =
986 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
987 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
988 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
989 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
990 &pnode->nlp_portname,
991 sizeof(struct lpfc_name));
992 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
993 &pnode->nlp_nodename,
994 sizeof(struct lpfc_name));
996 fast_path_evt->vport = vport;
997 fast_path_evt->work_evt.evt =
998 LPFC_EVT_FASTPATH_MGMT_EVT;
999 spin_lock_irqsave(&phba->hbalock, flags);
1000 list_add_tail(&fast_path_evt->work_evt.evt_listp,
1002 spin_unlock_irqrestore(&phba->hbalock, flags);
1003 lpfc_worker_wake_up(phba);
1005 case IOSTAT_LOCAL_REJECT:
1006 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
1007 lpfc_cmd->result == IOERR_NO_RESOURCES ||
1008 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
1009 cmd->result = ScsiResult(DID_REQUEUE, 0);
1011 } /* else: fall through */
1013 cmd->result = ScsiResult(DID_ERROR, 0);
1017 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
1018 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
1019 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1022 cmd->result = ScsiResult(DID_OK, 0);
1025 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1026 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1028 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1029 "0710 Iodone <%d/%d> cmd %p, error "
1030 "x%x SNS x%x x%x Data: x%x x%x\n",
1031 cmd->device->id, cmd->device->lun, cmd,
1032 cmd->result, *lp, *(lp + 3), cmd->retries,
1033 scsi_get_resid(cmd));
1036 lpfc_update_stats(phba, lpfc_cmd);
1037 result = cmd->result;
1039 if (vport->cfg_max_scsicmpl_time &&
1040 time_after(jiffies, lpfc_cmd->start_time +
1041 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1042 spin_lock_irqsave(sdev->host->host_lock, flags);
1043 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1044 if (pnode->cmd_qdepth >
1045 atomic_read(&pnode->cmd_pending) &&
1046 (atomic_read(&pnode->cmd_pending) >
1047 LPFC_MIN_TGT_QDEPTH) &&
1048 ((cmd->cmnd[0] == READ_10) ||
1049 (cmd->cmnd[0] == WRITE_10)))
1051 atomic_read(&pnode->cmd_pending);
1053 pnode->last_change_time = jiffies;
1055 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1056 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1057 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1058 time_after(jiffies, pnode->last_change_time +
1059 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1060 spin_lock_irqsave(sdev->host->host_lock, flags);
1061 pnode->cmd_qdepth += pnode->cmd_qdepth *
1062 LPFC_TGTQ_RAMPUP_PCENT / 100;
1063 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1064 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1065 pnode->last_change_time = jiffies;
1066 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1070 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1071 cmd->scsi_done(cmd);
1073 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1075 * If there is a thread waiting for command completion
1076 * wake up the thread.
1078 spin_lock_irqsave(sdev->host->host_lock, flags);
1079 lpfc_cmd->pCmd = NULL;
1080 if (lpfc_cmd->waitq)
1081 wake_up(lpfc_cmd->waitq);
1082 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1083 lpfc_release_scsi_buf(phba, lpfc_cmd);
1089 lpfc_rampup_queue_depth(vport, sdev);
1091 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1092 ((jiffies - pnode->last_ramp_up_time) >
1093 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1094 ((jiffies - pnode->last_q_full_time) >
1095 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1096 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
1097 shost_for_each_device(tmp_sdev, sdev->host) {
1098 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1099 if (tmp_sdev->id != sdev->id)
1101 if (tmp_sdev->ordered_tags)
1102 scsi_adjust_queue_depth(tmp_sdev,
1104 tmp_sdev->queue_depth+1);
1106 scsi_adjust_queue_depth(tmp_sdev,
1108 tmp_sdev->queue_depth+1);
1110 pnode->last_ramp_up_time = jiffies;
1113 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1115 sdev->queue_depth - 1, sdev->queue_depth);
1119 * Check for queue full. If the lun is reporting queue full, then
1120 * back off the lun queue depth to prevent target overloads.
1122 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1123 NLP_CHK_NODE_ACT(pnode)) {
1124 pnode->last_q_full_time = jiffies;
1126 shost_for_each_device(tmp_sdev, sdev->host) {
1127 if (tmp_sdev->id != sdev->id)
1129 depth = scsi_track_queue_full(tmp_sdev,
1130 tmp_sdev->queue_depth - 1);
1133 * The queue depth cannot be lowered any more.
1134 * Modify the returned error code to store
1135 * the final depth value set by
1136 * scsi_track_queue_full.
1139 depth = sdev->host->cmd_per_lun;
1142 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1143 "0711 detected queue full - lun queue "
1144 "depth adjusted to %d.\n", depth);
1145 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1152 * If there is a thread waiting for command completion
1153 * wake up the thread.
1155 spin_lock_irqsave(sdev->host->host_lock, flags);
1156 lpfc_cmd->pCmd = NULL;
1157 if (lpfc_cmd->waitq)
1158 wake_up(lpfc_cmd->waitq);
1159 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1161 lpfc_release_scsi_buf(phba, lpfc_cmd);
1165 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
1166 * @data: A pointer to the immediate command data portion of the IOCB.
1167 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1169 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1170 * byte swapping the data to big endian format for transmission on the wire.
1173 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1176 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1177 i += sizeof(uint32_t), j++) {
1178 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1183 * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
1184 * @vport: The virtual port for which this call is being executed.
1185 * @lpfc_cmd: The scsi command which needs to send.
1186 * @pnode: Pointer to lpfc_nodelist.
1188 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1192 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1193 struct lpfc_nodelist *pnode)
1195 struct lpfc_hba *phba = vport->phba;
1196 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1197 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1198 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1199 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1200 int datadir = scsi_cmnd->sc_data_direction;
1203 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1206 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1207 /* clear task management bits */
1208 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1210 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1211 &lpfc_cmd->fcp_cmnd->fcp_lun);
1213 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1215 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1217 case HEAD_OF_QUEUE_TAG:
1218 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1220 case ORDERED_QUEUE_TAG:
1221 fcp_cmnd->fcpCntl1 = ORDERED_Q;
1224 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
1228 fcp_cmnd->fcpCntl1 = 0;
1231 * There are three possibilities here - use scatter-gather segment, use
1232 * the single mapping, or neither. Start the lpfc command prep by
1233 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1236 if (scsi_sg_count(scsi_cmnd)) {
1237 if (datadir == DMA_TO_DEVICE) {
1238 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
1239 iocb_cmd->un.fcpi.fcpi_parm = 0;
1240 iocb_cmd->ulpPU = 0;
1241 fcp_cmnd->fcpCntl3 = WRITE_DATA;
1242 phba->fc4OutputRequests++;
1244 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1245 iocb_cmd->ulpPU = PARM_READ_CHECK;
1246 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1247 fcp_cmnd->fcpCntl3 = READ_DATA;
1248 phba->fc4InputRequests++;
1251 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
1252 iocb_cmd->un.fcpi.fcpi_parm = 0;
1253 iocb_cmd->ulpPU = 0;
1254 fcp_cmnd->fcpCntl3 = 0;
1255 phba->fc4ControlRequests++;
1257 if (phba->sli_rev == 3)
1258 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1260 * Finish initializing those IOCB fields that are independent
1261 * of the scsi_cmnd request_buffer
1263 piocbq->iocb.ulpContext = pnode->nlp_rpi;
1264 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
1265 piocbq->iocb.ulpFCP2Rcvy = 1;
1267 piocbq->iocb.ulpFCP2Rcvy = 0;
1269 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
1270 piocbq->context1 = lpfc_cmd;
1271 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
1272 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
1273 piocbq->vport = vport;
1277 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
1278 * @vport: The virtual port for which this call is being executed.
1279 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1280 * @lun: Logical unit number.
1281 * @task_mgmt_cmd: SCSI task management command.
1283 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
1290 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1291 struct lpfc_scsi_buf *lpfc_cmd,
1293 uint8_t task_mgmt_cmd)
1295 struct lpfc_iocbq *piocbq;
1297 struct fcp_cmnd *fcp_cmnd;
1298 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1299 struct lpfc_nodelist *ndlp = rdata->pnode;
1301 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1302 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
1305 piocbq = &(lpfc_cmd->cur_iocbq);
1306 piocbq->vport = vport;
1308 piocb = &piocbq->iocb;
1310 fcp_cmnd = lpfc_cmd->fcp_cmnd;
1311 /* Clear out any old data in the FCP command area */
1312 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1313 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1314 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1315 if (vport->phba->sli_rev == 3)
1316 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1317 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1318 piocb->ulpContext = ndlp->nlp_rpi;
1319 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
1320 piocb->ulpFCP2Rcvy = 1;
1322 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
1324 /* ulpTimeout is only one byte */
1325 if (lpfc_cmd->timeout > 0xff) {
1327 * Do not timeout the command at the firmware level.
1328 * The driver will provide the timeout mechanism.
1330 piocb->ulpTimeout = 0;
1332 piocb->ulpTimeout = lpfc_cmd->timeout;
1339 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
1340 * @phba: The Hba for which this call is being executed.
1341 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
1342 * @rspiocbq: Pointer to lpfc_iocbq data structure.
1344 * This routine is IOCB completion routine for device reset and target reset
1345 * routine. This routine release scsi buffer associated with lpfc_cmd.
1348 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1349 struct lpfc_iocbq *cmdiocbq,
1350 struct lpfc_iocbq *rspiocbq)
1352 struct lpfc_scsi_buf *lpfc_cmd =
1353 (struct lpfc_scsi_buf *) cmdiocbq->context1;
1355 lpfc_release_scsi_buf(phba, lpfc_cmd);
1360 * lpfc_scsi_tgt_reset: Target reset handler.
1361 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
1362 * @vport: The virtual port for which this call is being executed.
1363 * @tgt_id: Target ID.
1365 * @rdata: Pointer to lpfc_rport_data.
1367 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
1374 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1375 unsigned tgt_id, unsigned int lun,
1376 struct lpfc_rport_data *rdata)
1378 struct lpfc_hba *phba = vport->phba;
1379 struct lpfc_iocbq *iocbq;
1380 struct lpfc_iocbq *iocbqrsp;
1384 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
1387 lpfc_cmd->rdata = rdata;
1388 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
1393 iocbq = &lpfc_cmd->cur_iocbq;
1394 iocbqrsp = lpfc_sli_get_iocbq(phba);
1399 /* Issue Target Reset to TGT <num> */
1400 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1401 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
1402 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
1403 status = lpfc_sli_issue_iocb_wait(phba,
1404 &phba->sli.ring[phba->sli.fcp_ring],
1405 iocbq, iocbqrsp, lpfc_cmd->timeout);
1406 if (status != IOCB_SUCCESS) {
1407 if (status == IOCB_TIMEDOUT) {
1408 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1409 ret = TIMEOUT_ERROR;
1412 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1415 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
1416 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
1417 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1418 (lpfc_cmd->result & IOERR_DRVR_MASK))
1419 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1422 lpfc_sli_release_iocbq(phba, iocbqrsp);
1427 * lpfc_info: Info entry point of scsi_host_template data structure.
1428 * @host: The scsi host for which this call is being executed.
1430 * This routine provides module information about hba.
1433 * Pointer to char - Success.
1436 lpfc_info(struct Scsi_Host *host)
1438 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
1439 struct lpfc_hba *phba = vport->phba;
1441 static char lpfcinfobuf[384];
1443 memset(lpfcinfobuf,0,384);
1444 if (phba && phba->pcidev){
1445 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
1446 len = strlen(lpfcinfobuf);
1447 snprintf(lpfcinfobuf + len,
1449 " on PCI bus %02x device %02x irq %d",
1450 phba->pcidev->bus->number,
1451 phba->pcidev->devfn,
1453 len = strlen(lpfcinfobuf);
1454 if (phba->Port[0]) {
1455 snprintf(lpfcinfobuf + len,
1465 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
1466 * @phba: The Hba for which this call is being executed.
1468 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
1469 * The default value of cfg_poll_tmo is 10 milliseconds.
1471 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1473 unsigned long poll_tmo_expires =
1474 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
1476 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1477 mod_timer(&phba->fcp_poll_timer,
1482 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
1483 * @phba: The Hba for which this call is being executed.
1485 * This routine starts the fcp_poll_timer of @phba.
1487 void lpfc_poll_start_timer(struct lpfc_hba * phba)
1489 lpfc_poll_rearm_timer(phba);
1493 * lpfc_poll_timeout: Restart polling timer.
1494 * @ptr: Map to lpfc_hba data structure pointer.
1496 * This routine restarts fcp_poll timer, when FCP ring polling is enable
1497 * and FCP Ring interrupt is disable.
1500 void lpfc_poll_timeout(unsigned long ptr)
1502 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1504 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1505 lpfc_sli_poll_fcp_ring (phba);
1506 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1507 lpfc_poll_rearm_timer(phba);
1512 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
1514 * @cmnd: Pointer to scsi_cmnd data structure.
1515 * @done: Pointer to done routine.
1517 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
1518 * This routine prepares an IOCB from scsi command and provides to firmware.
1519 * The @done callback is invoked after driver finished processing the command.
1523 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
1526 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1528 struct Scsi_Host *shost = cmnd->device->host;
1529 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1530 struct lpfc_hba *phba = vport->phba;
1531 struct lpfc_sli *psli = &phba->sli;
1532 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1533 struct lpfc_nodelist *ndlp = rdata->pnode;
1534 struct lpfc_scsi_buf *lpfc_cmd;
1535 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1538 err = fc_remote_port_chkready(rport);
1541 goto out_fail_command;
1545 * Catch race where our node has transitioned, but the
1546 * transport is still transitioning.
1548 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1549 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1550 goto out_fail_command;
1552 if (vport->cfg_max_scsicmpl_time &&
1553 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
1556 lpfc_cmd = lpfc_get_scsi_buf(phba);
1557 if (lpfc_cmd == NULL) {
1558 lpfc_rampdown_queue_depth(phba);
1560 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1561 "0707 driver's buffer pool is empty, "
1567 * Store the midlayer's command structure for the completion phase
1568 * and complete the command initialization.
1570 lpfc_cmd->pCmd = cmnd;
1571 lpfc_cmd->rdata = rdata;
1572 lpfc_cmd->timeout = 0;
1573 lpfc_cmd->start_time = jiffies;
1574 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1575 cmnd->scsi_done = done;
1577 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1579 goto out_host_busy_free_buf;
1581 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
1583 atomic_inc(&ndlp->cmd_pending);
1584 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1585 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1587 atomic_dec(&ndlp->cmd_pending);
1588 goto out_host_busy_free_buf;
1590 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1591 lpfc_sli_poll_fcp_ring(phba);
1592 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1593 lpfc_poll_rearm_timer(phba);
1598 out_host_busy_free_buf:
1599 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1600 lpfc_release_scsi_buf(phba, lpfc_cmd);
1602 return SCSI_MLQUEUE_HOST_BUSY;
1610 * lpfc_block_error_handler: Routine to block error handler.
1611 * @cmnd: Pointer to scsi_cmnd data structure.
1613 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
1616 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1618 struct Scsi_Host *shost = cmnd->device->host;
1619 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1621 spin_lock_irq(shost->host_lock);
1622 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1623 spin_unlock_irq(shost->host_lock);
1625 spin_lock_irq(shost->host_lock);
1627 spin_unlock_irq(shost->host_lock);
1632 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
1634 * @cmnd: Pointer to scsi_cmnd data structure.
1636 * This routine aborts @cmnd pending in base driver.
1643 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1645 struct Scsi_Host *shost = cmnd->device->host;
1646 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1647 struct lpfc_hba *phba = vport->phba;
1648 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1649 struct lpfc_iocbq *iocb;
1650 struct lpfc_iocbq *abtsiocb;
1651 struct lpfc_scsi_buf *lpfc_cmd;
1654 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1656 lpfc_block_error_handler(cmnd);
1657 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1661 * If pCmd field of the corresponding lpfc_scsi_buf structure
1662 * points to a different SCSI command, then the driver has
1663 * already completed this command, but the midlayer did not
1664 * see the completion before the eh fired. Just return
1667 iocb = &lpfc_cmd->cur_iocbq;
1668 if (lpfc_cmd->pCmd != cmnd)
1671 BUG_ON(iocb->context1 != lpfc_cmd);
1673 abtsiocb = lpfc_sli_get_iocbq(phba);
1674 if (abtsiocb == NULL) {
1680 * The scsi command can not be in txq and it is in flight because the
1681 * pCmd is still pointig at the SCSI command we have to abort. There
1682 * is no need to search the txcmplq. Just send an abort to the FW.
1686 icmd = &abtsiocb->iocb;
1687 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1688 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1689 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1692 icmd->ulpClass = cmd->ulpClass;
1693 if (lpfc_is_link_up(phba))
1694 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1696 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1698 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1699 abtsiocb->vport = vport;
1700 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1701 lpfc_sli_release_iocbq(phba, abtsiocb);
1706 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1707 lpfc_sli_poll_fcp_ring (phba);
1709 lpfc_cmd->waitq = &waitq;
1710 /* Wait for abort to complete */
1711 wait_event_timeout(waitq,
1712 (lpfc_cmd->pCmd != cmnd),
1713 (2*vport->cfg_devloss_tmo*HZ));
1715 spin_lock_irq(shost->host_lock);
1716 lpfc_cmd->waitq = NULL;
1717 spin_unlock_irq(shost->host_lock);
1719 if (lpfc_cmd->pCmd == cmnd) {
1721 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1722 "0748 abort handler timed out waiting "
1723 "for abort to complete: ret %#x, ID %d, "
1724 "LUN %d, snum %#lx\n",
1725 ret, cmnd->device->id, cmnd->device->lun,
1726 cmnd->serial_number);
1730 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1731 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1732 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1733 cmnd->device->lun, cmnd->serial_number);
1738 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
1740 * @cmnd: Pointer to scsi_cmnd data structure.
1742 * This routine does a device reset by sending a TARGET_RESET task management
1750 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1752 struct Scsi_Host *shost = cmnd->device->host;
1753 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1754 struct lpfc_hba *phba = vport->phba;
1755 struct lpfc_scsi_buf *lpfc_cmd;
1756 struct lpfc_iocbq *iocbq, *iocbqrsp;
1757 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1758 struct lpfc_nodelist *pnode = rdata->pnode;
1759 unsigned long later;
1763 struct lpfc_scsi_event_header scsi_event;
1765 lpfc_block_error_handler(cmnd);
1767 * If target is not in a MAPPED state, delay the reset until
1768 * target is rediscovered or devloss timeout expires.
1770 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1771 while (time_after(later, jiffies)) {
1772 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1774 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1776 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1777 rdata = cmnd->device->hostdata;
1780 pnode = rdata->pnode;
1783 scsi_event.event_type = FC_REG_SCSI_EVENT;
1784 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1786 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1787 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1789 fc_host_post_vendor_event(shost,
1790 fc_get_event_number(),
1792 (char *)&scsi_event,
1795 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1797 "0721 LUN Reset rport "
1798 "failure: msec x%x rdata x%p\n",
1799 jiffies_to_msecs(jiffies - later), rdata);
1802 lpfc_cmd = lpfc_get_scsi_buf(phba);
1803 if (lpfc_cmd == NULL)
1805 lpfc_cmd->timeout = 60;
1806 lpfc_cmd->rdata = rdata;
1808 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1812 lpfc_release_scsi_buf(phba, lpfc_cmd);
1815 iocbq = &lpfc_cmd->cur_iocbq;
1817 /* get a buffer for this IOCB command response */
1818 iocbqrsp = lpfc_sli_get_iocbq(phba);
1819 if (iocbqrsp == NULL) {
1820 lpfc_release_scsi_buf(phba, lpfc_cmd);
1823 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1824 "0703 Issue target reset to TGT %d LUN %d "
1825 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1826 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1827 status = lpfc_sli_issue_iocb_wait(phba,
1828 &phba->sli.ring[phba->sli.fcp_ring],
1829 iocbq, iocbqrsp, lpfc_cmd->timeout);
1830 if (status == IOCB_TIMEDOUT) {
1831 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1832 ret = TIMEOUT_ERROR;
1834 if (status != IOCB_SUCCESS)
1836 lpfc_release_scsi_buf(phba, lpfc_cmd);
1838 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1839 "0713 SCSI layer issued device reset (%d, %d) "
1840 "return x%x status x%x result x%x\n",
1841 cmnd->device->id, cmnd->device->lun, ret,
1842 iocbqrsp->iocb.ulpStatus,
1843 iocbqrsp->iocb.un.ulpWord[4]);
1844 lpfc_sli_release_iocbq(phba, iocbqrsp);
1845 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1848 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1849 cmnd->device->id, cmnd->device->lun,
1851 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1852 while (time_after(later, jiffies) && cnt) {
1853 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1854 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1855 cmnd->device->lun, LPFC_CTX_TGT);
1858 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1859 "0719 device reset I/O flush failure: "
1867 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
1868 * Template data structure.
1869 * @cmnd: Pointer to scsi_cmnd data structure.
1871 * This routine does target reset to all target on @cmnd->device->host.
1878 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1880 struct Scsi_Host *shost = cmnd->device->host;
1881 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1882 struct lpfc_hba *phba = vport->phba;
1883 struct lpfc_nodelist *ndlp = NULL;
1885 int ret = SUCCESS, status = SUCCESS, i;
1887 struct lpfc_scsi_buf * lpfc_cmd;
1888 unsigned long later;
1889 struct lpfc_scsi_event_header scsi_event;
1891 scsi_event.event_type = FC_REG_SCSI_EVENT;
1892 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1894 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1895 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1897 fc_host_post_vendor_event(shost,
1898 fc_get_event_number(),
1900 (char *)&scsi_event,
1903 lpfc_block_error_handler(cmnd);
1905 * Since the driver manages a single bus device, reset all
1906 * targets known to the driver. Should any target reset
1907 * fail, this routine returns failure to the midlayer.
1909 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1910 /* Search for mapped node by target ID */
1912 spin_lock_irq(shost->host_lock);
1913 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1914 if (!NLP_CHK_NODE_ACT(ndlp))
1916 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1917 ndlp->nlp_sid == i &&
1923 spin_unlock_irq(shost->host_lock);
1926 lpfc_cmd = lpfc_get_scsi_buf(phba);
1928 lpfc_cmd->timeout = 60;
1929 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1931 ndlp->rport->dd_data);
1932 if (status != TIMEOUT_ERROR)
1933 lpfc_release_scsi_buf(phba, lpfc_cmd);
1935 if (!lpfc_cmd || status != SUCCESS) {
1936 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1937 "0700 Bus Reset on target %d failed\n",
1943 * All outstanding txcmplq I/Os should have been aborted by
1944 * the targets. Unfortunately, some targets do not abide by
1945 * this forcing the driver to double check.
1947 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1949 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1950 0, 0, LPFC_CTX_HOST);
1951 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1952 while (time_after(later, jiffies) && cnt) {
1953 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1954 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1957 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1958 "0715 Bus Reset I/O flush failure: "
1959 "cnt x%x left x%x\n", cnt, i);
1962 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1963 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1968 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
1970 * @sdev: Pointer to scsi_device.
1972 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
1973 * globally available list of scsi buffers. This routine also makes sure scsi
1974 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
1975 * of scsi buffer exists for the lifetime of the driver.
1982 lpfc_slave_alloc(struct scsi_device *sdev)
1984 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1985 struct lpfc_hba *phba = vport->phba;
1986 struct lpfc_scsi_buf *scsi_buf = NULL;
1987 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1988 uint32_t total = 0, i;
1989 uint32_t num_to_alloc = 0;
1990 unsigned long flags;
1992 if (!rport || fc_remote_port_chkready(rport))
1995 sdev->hostdata = rport->dd_data;
1998 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1999 * available list of scsi buffers. Don't allocate more than the
2000 * HBA limit conveyed to the midlayer via the host structure. The
2001 * formula accounts for the lun_queue_depth + error handlers + 1
2002 * extra. This list of scsi bufs exists for the lifetime of the driver.
2004 total = phba->total_scsi_bufs;
2005 num_to_alloc = vport->cfg_lun_queue_depth + 2;
2007 /* Allow some exchanges to be available always to complete discovery */
2008 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2009 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2010 "0704 At limitation of %d preallocated "
2011 "command buffers\n", total);
2013 /* Allow some exchanges to be available always to complete discovery */
2014 } else if (total + num_to_alloc >
2015 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2016 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2017 "0705 Allocation request of %d "
2018 "command buffers will exceed max of %d. "
2019 "Reducing allocation request to %d.\n",
2020 num_to_alloc, phba->cfg_hba_queue_depth,
2021 (phba->cfg_hba_queue_depth - total));
2022 num_to_alloc = phba->cfg_hba_queue_depth - total;
2025 for (i = 0; i < num_to_alloc; i++) {
2026 scsi_buf = lpfc_new_scsi_buf(vport);
2028 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2029 "0706 Failed to allocate "
2030 "command buffer\n");
2034 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2035 phba->total_scsi_bufs++;
2036 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2037 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2043 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
2045 * @sdev: Pointer to scsi_device.
2047 * This routine configures following items
2048 * - Tag command queuing support for @sdev if supported.
2049 * - Dev loss time out value of fc_rport.
2050 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2056 lpfc_slave_configure(struct scsi_device *sdev)
2058 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2059 struct lpfc_hba *phba = vport->phba;
2060 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2062 if (sdev->tagged_supported)
2063 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
2065 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
2068 * Initialize the fc transport attributes for the target
2069 * containing this scsi device. Also note that the driver's
2070 * target pointer is stored in the starget_data for the
2071 * driver's sysfs entry point functions.
2073 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
2075 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2076 lpfc_sli_poll_fcp_ring(phba);
2077 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2078 lpfc_poll_rearm_timer(phba);
2085 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
2086 * @sdev: Pointer to scsi_device.
2088 * This routine sets @sdev hostatdata filed to null.
2091 lpfc_slave_destroy(struct scsi_device *sdev)
2093 sdev->hostdata = NULL;
2098 struct scsi_host_template lpfc_template = {
2099 .module = THIS_MODULE,
2100 .name = LPFC_DRIVER_NAME,
2102 .queuecommand = lpfc_queuecommand,
2103 .eh_abort_handler = lpfc_abort_handler,
2104 .eh_device_reset_handler= lpfc_device_reset_handler,
2105 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2106 .slave_alloc = lpfc_slave_alloc,
2107 .slave_configure = lpfc_slave_configure,
2108 .slave_destroy = lpfc_slave_destroy,
2109 .scan_finished = lpfc_scan_finished,
2111 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2112 .cmd_per_lun = LPFC_CMD_PER_LUN,
2113 .use_clustering = ENABLE_CLUSTERING,
2114 .shost_attrs = lpfc_hba_attrs,
2115 .max_sectors = 0xFFFF,
2118 struct scsi_host_template lpfc_vport_template = {
2119 .module = THIS_MODULE,
2120 .name = LPFC_DRIVER_NAME,
2122 .queuecommand = lpfc_queuecommand,
2123 .eh_abort_handler = lpfc_abort_handler,
2124 .eh_device_reset_handler= lpfc_device_reset_handler,
2125 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2126 .slave_alloc = lpfc_slave_alloc,
2127 .slave_configure = lpfc_slave_configure,
2128 .slave_destroy = lpfc_slave_destroy,
2129 .scan_finished = lpfc_scan_finished,
2131 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2132 .cmd_per_lun = LPFC_CMD_PER_LUN,
2133 .use_clustering = ENABLE_CLUSTERING,
2134 .shost_attrs = lpfc_vport_attrs,
2135 .max_sectors = 0xFFFF,