1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
34 #include "lpfc_sli4.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_version.h"
45 struct lpfc_bsg_event {
46 struct list_head node;
50 /* Event type and waiter identifiers */
55 /* next two flags are here for the auto-delete logic */
56 unsigned long wait_time_stamp;
59 /* seen and not seen events */
60 struct list_head events_to_get;
61 struct list_head events_to_see;
63 /* job waiting for this event to finish */
64 struct fc_bsg_job *set_job;
67 struct lpfc_bsg_iocb {
68 struct lpfc_iocbq *cmdiocbq;
69 struct lpfc_iocbq *rspiocbq;
70 struct lpfc_dmabuf *bmp;
71 struct lpfc_nodelist *ndlp;
73 /* job waiting for this iocb to finish */
74 struct fc_bsg_job *set_job;
77 struct lpfc_bsg_mbox {
81 /* job waiting for this mbox command to finish */
82 struct fc_bsg_job *set_job;
91 struct lpfc_bsg_event *evt;
92 struct lpfc_bsg_iocb iocb;
93 struct lpfc_bsg_mbox mbox;
98 struct list_head node;
105 #define BUF_SZ_4K 4096
106 #define SLI_CT_ELX_LOOPBACK 0x10
108 enum ELX_LOOPBACK_CMD {
109 ELX_LOOPBACK_XRI_SETUP,
113 #define ELX_LOOPBACK_HEADER_SZ \
114 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
116 struct lpfc_dmabufext {
117 struct lpfc_dmabuf dma;
123 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
124 * @phba: Pointer to HBA context object.
125 * @cmdiocbq: Pointer to command iocb.
126 * @rspiocbq: Pointer to response iocb.
128 * This function is the completion handler for iocbs issued using
129 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
130 * ring event handler function without any lock held. This function
131 * can be called from both worker thread context and interrupt
132 * context. This function also can be called from another thread which
133 * cleans up the SLI layer objects.
134 * This function copies the contents of the response iocb to the
135 * response iocb memory object provided by the caller of
136 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
137 * sleeps for the iocb completion.
140 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
141 struct lpfc_iocbq *cmdiocbq,
142 struct lpfc_iocbq *rspiocbq)
144 unsigned long iflags;
145 struct bsg_job_data *dd_data;
146 struct fc_bsg_job *job;
148 struct lpfc_dmabuf *bmp;
149 struct lpfc_nodelist *ndlp;
150 struct lpfc_bsg_iocb *iocb;
154 spin_lock_irqsave(&phba->ct_ev_lock, flags);
155 dd_data = cmdiocbq->context1;
157 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
161 iocb = &dd_data->context_un.iocb;
163 job->dd_data = NULL; /* so timeout handler does not reply */
165 spin_lock_irqsave(&phba->hbalock, iflags);
166 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
167 if (cmdiocbq->context2 && rspiocbq)
168 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
169 &rspiocbq->iocb, sizeof(IOCB_t));
170 spin_unlock_irqrestore(&phba->hbalock, iflags);
173 rspiocbq = iocb->rspiocbq;
174 rsp = &rspiocbq->iocb;
177 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
178 job->request_payload.sg_cnt, DMA_TO_DEVICE);
179 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
180 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
182 if (rsp->ulpStatus) {
183 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
184 switch (rsp->un.ulpWord[4] & 0xff) {
185 case IOERR_SEQUENCE_TIMEOUT:
188 case IOERR_INVALID_RPI:
198 job->reply->reply_payload_rcv_len =
199 rsp->un.genreq64.bdl.bdeSize;
201 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
202 lpfc_sli_release_iocbq(phba, rspiocbq);
203 lpfc_sli_release_iocbq(phba, cmdiocbq);
207 /* make error code available to userspace */
208 job->reply->result = rc;
209 /* complete the job back to userspace */
211 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
216 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
217 * @job: fc_bsg_job to handle
220 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
222 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
223 struct lpfc_hba *phba = vport->phba;
224 struct lpfc_rport_data *rdata = job->rport->dd_data;
225 struct lpfc_nodelist *ndlp = rdata->pnode;
226 struct ulp_bde64 *bpl = NULL;
228 struct lpfc_iocbq *cmdiocbq = NULL;
229 struct lpfc_iocbq *rspiocbq = NULL;
232 struct lpfc_dmabuf *bmp = NULL;
235 struct scatterlist *sgel = NULL;
238 struct bsg_job_data *dd_data;
242 /* in case no data is transferred */
243 job->reply->reply_payload_rcv_len = 0;
245 /* allocate our bsg tracking structure */
246 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
248 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
249 "2733 Failed allocation of dd_data\n");
254 if (!lpfc_nlp_get(ndlp)) {
259 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
265 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
270 cmdiocbq = lpfc_sli_get_iocbq(phba);
276 cmd = &cmdiocbq->iocb;
277 rspiocbq = lpfc_sli_get_iocbq(phba);
283 rsp = &rspiocbq->iocb;
284 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
290 INIT_LIST_HEAD(&bmp->list);
291 bpl = (struct ulp_bde64 *) bmp->virt;
292 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
293 job->request_payload.sg_cnt, DMA_TO_DEVICE);
294 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
295 busaddr = sg_dma_address(sgel);
296 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
297 bpl->tus.f.bdeSize = sg_dma_len(sgel);
298 bpl->tus.w = cpu_to_le32(bpl->tus.w);
299 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
300 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
304 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
305 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
306 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
307 busaddr = sg_dma_address(sgel);
308 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
309 bpl->tus.f.bdeSize = sg_dma_len(sgel);
310 bpl->tus.w = cpu_to_le32(bpl->tus.w);
311 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
312 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
316 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
317 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
318 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
319 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
320 cmd->un.genreq64.bdl.bdeSize =
321 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
322 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
323 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
324 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
325 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
326 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
327 cmd->ulpBdeCount = 1;
329 cmd->ulpClass = CLASS3;
330 cmd->ulpContext = ndlp->nlp_rpi;
331 cmd->ulpOwner = OWN_CHIP;
332 cmdiocbq->vport = phba->pport;
333 cmdiocbq->context3 = bmp;
334 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
335 timeout = phba->fc_ratov * 2;
336 cmd->ulpTimeout = timeout;
338 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
339 cmdiocbq->context1 = dd_data;
340 cmdiocbq->context2 = rspiocbq;
341 dd_data->type = TYPE_IOCB;
342 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
343 dd_data->context_un.iocb.rspiocbq = rspiocbq;
344 dd_data->context_un.iocb.set_job = job;
345 dd_data->context_un.iocb.bmp = bmp;
346 dd_data->context_un.iocb.ndlp = ndlp;
348 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
349 creg_val = readl(phba->HCregaddr);
350 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
351 writel(creg_val, phba->HCregaddr);
352 readl(phba->HCregaddr); /* flush */
355 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
357 if (rc == IOCB_SUCCESS)
358 return 0; /* done for now */
360 /* iocb failed so cleanup */
361 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
362 job->request_payload.sg_cnt, DMA_TO_DEVICE);
363 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
364 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
366 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
369 lpfc_sli_release_iocbq(phba, rspiocbq);
371 lpfc_sli_release_iocbq(phba, cmdiocbq);
379 /* make error code available to userspace */
380 job->reply->result = rc;
386 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
387 * @phba: Pointer to HBA context object.
388 * @cmdiocbq: Pointer to command iocb.
389 * @rspiocbq: Pointer to response iocb.
391 * This function is the completion handler for iocbs issued using
392 * lpfc_bsg_rport_els_cmp function. This function is called by the
393 * ring event handler function without any lock held. This function
394 * can be called from both worker thread context and interrupt
395 * context. This function also can be called from other thread which
396 * cleans up the SLI layer objects.
397 * This function copies the contents of the response iocb to the
398 * response iocb memory object provided by the caller of
399 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
400 * sleeps for the iocb completion.
403 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
404 struct lpfc_iocbq *cmdiocbq,
405 struct lpfc_iocbq *rspiocbq)
407 struct bsg_job_data *dd_data;
408 struct fc_bsg_job *job;
410 struct lpfc_nodelist *ndlp;
411 struct lpfc_dmabuf *pbuflist = NULL;
412 struct fc_bsg_ctels_reply *els_reply;
417 spin_lock_irqsave(&phba->ct_ev_lock, flags);
418 dd_data = cmdiocbq->context1;
419 /* normal completion and timeout crossed paths, already done */
421 spin_unlock_irqrestore(&phba->hbalock, flags);
425 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
426 if (cmdiocbq->context2 && rspiocbq)
427 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
428 &rspiocbq->iocb, sizeof(IOCB_t));
430 job = dd_data->context_un.iocb.set_job;
431 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
432 rspiocbq = dd_data->context_un.iocb.rspiocbq;
433 rsp = &rspiocbq->iocb;
434 ndlp = dd_data->context_un.iocb.ndlp;
436 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
437 job->request_payload.sg_cnt, DMA_TO_DEVICE);
438 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
439 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
441 if (job->reply->result == -EAGAIN)
443 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
444 job->reply->reply_payload_rcv_len =
445 rsp->un.elsreq64.bdl.bdeSize;
446 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
447 job->reply->reply_payload_rcv_len =
448 sizeof(struct fc_bsg_ctels_reply);
449 /* LS_RJT data returned in word 4 */
450 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
451 els_reply = &job->reply->reply_data.ctels_reply;
452 els_reply->status = FC_CTELS_STATUS_REJECT;
453 els_reply->rjt_data.action = rjt_data[3];
454 els_reply->rjt_data.reason_code = rjt_data[2];
455 els_reply->rjt_data.reason_explanation = rjt_data[1];
456 els_reply->rjt_data.vendor_unique = rjt_data[0];
460 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
461 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
462 lpfc_sli_release_iocbq(phba, rspiocbq);
463 lpfc_sli_release_iocbq(phba, cmdiocbq);
466 /* make error code available to userspace */
467 job->reply->result = rc;
469 /* complete the job back to userspace */
471 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
476 * lpfc_bsg_rport_els - send an ELS command from a bsg request
477 * @job: fc_bsg_job to handle
480 lpfc_bsg_rport_els(struct fc_bsg_job *job)
482 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
483 struct lpfc_hba *phba = vport->phba;
484 struct lpfc_rport_data *rdata = job->rport->dd_data;
485 struct lpfc_nodelist *ndlp = rdata->pnode;
489 struct lpfc_iocbq *rspiocbq;
490 struct lpfc_iocbq *cmdiocbq;
493 struct lpfc_dmabuf *pcmd;
494 struct lpfc_dmabuf *prsp;
495 struct lpfc_dmabuf *pbuflist = NULL;
496 struct ulp_bde64 *bpl;
499 struct scatterlist *sgel = NULL;
502 struct bsg_job_data *dd_data;
506 /* in case no data is transferred */
507 job->reply->reply_payload_rcv_len = 0;
509 /* allocate our bsg tracking structure */
510 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
512 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
513 "2735 Failed allocation of dd_data\n");
518 if (!lpfc_nlp_get(ndlp)) {
523 elscmd = job->request->rqst_data.r_els.els_code;
524 cmdsize = job->request_payload.payload_len;
525 rspsize = job->reply_payload.payload_len;
526 rspiocbq = lpfc_sli_get_iocbq(phba);
533 rsp = &rspiocbq->iocb;
536 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
537 ndlp->nlp_DID, elscmd);
543 /* prep els iocb set context1 to the ndlp, context2 to the command
544 * dmabuf, context3 holds the data dmabuf
546 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
547 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
548 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
550 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
552 cmdiocbq->context2 = NULL;
554 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
555 bpl = (struct ulp_bde64 *) pbuflist->virt;
557 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
558 job->request_payload.sg_cnt, DMA_TO_DEVICE);
559 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
560 busaddr = sg_dma_address(sgel);
561 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
562 bpl->tus.f.bdeSize = sg_dma_len(sgel);
563 bpl->tus.w = cpu_to_le32(bpl->tus.w);
564 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
565 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
569 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
570 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
571 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
572 busaddr = sg_dma_address(sgel);
573 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
574 bpl->tus.f.bdeSize = sg_dma_len(sgel);
575 bpl->tus.w = cpu_to_le32(bpl->tus.w);
576 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
577 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
580 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
581 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
582 cmdiocbq->iocb.ulpContext = rpi;
583 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
584 cmdiocbq->context1 = NULL;
585 cmdiocbq->context2 = NULL;
587 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
588 cmdiocbq->context1 = dd_data;
589 cmdiocbq->context2 = rspiocbq;
590 dd_data->type = TYPE_IOCB;
591 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
592 dd_data->context_un.iocb.rspiocbq = rspiocbq;
593 dd_data->context_un.iocb.set_job = job;
594 dd_data->context_un.iocb.bmp = NULL;;
595 dd_data->context_un.iocb.ndlp = ndlp;
597 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
598 creg_val = readl(phba->HCregaddr);
599 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
600 writel(creg_val, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
603 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
605 if (rc == IOCB_SUCCESS)
606 return 0; /* done for now */
608 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
609 job->request_payload.sg_cnt, DMA_TO_DEVICE);
610 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
611 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
613 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
615 lpfc_sli_release_iocbq(phba, cmdiocbq);
618 lpfc_sli_release_iocbq(phba, rspiocbq);
624 /* make error code available to userspace */
625 job->reply->result = rc;
631 * lpfc_bsg_event_free - frees an allocated event structure
632 * @kref: Pointer to a kref.
634 * Called from kref_put. Back cast the kref into an event structure address.
635 * Free any events to get, delete associated nodes, free any events to see,
636 * free any data then free the event itself.
639 lpfc_bsg_event_free(struct kref *kref)
641 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
643 struct event_data *ed;
645 list_del(&evt->node);
647 while (!list_empty(&evt->events_to_get)) {
648 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
654 while (!list_empty(&evt->events_to_see)) {
655 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
665 * lpfc_bsg_event_ref - increments the kref for an event
666 * @evt: Pointer to an event structure.
669 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
671 kref_get(&evt->kref);
675 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
676 * @evt: Pointer to an event structure.
679 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
681 kref_put(&evt->kref, lpfc_bsg_event_free);
685 * lpfc_bsg_event_new - allocate and initialize a event structure
686 * @ev_mask: Mask of events.
687 * @ev_reg_id: Event reg id.
688 * @ev_req_id: Event request id.
690 static struct lpfc_bsg_event *
691 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
693 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
698 INIT_LIST_HEAD(&evt->events_to_get);
699 INIT_LIST_HEAD(&evt->events_to_see);
700 evt->type_mask = ev_mask;
701 evt->req_id = ev_req_id;
702 evt->reg_id = ev_reg_id;
703 evt->wait_time_stamp = jiffies;
704 init_waitqueue_head(&evt->wq);
705 kref_init(&evt->kref);
710 * diag_cmd_data_free - Frees an lpfc dma buffer extension
711 * @phba: Pointer to HBA context object.
712 * @mlist: Pointer to an lpfc dma buffer extension.
715 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
717 struct lpfc_dmabufext *mlast;
718 struct pci_dev *pcidev;
719 struct list_head head, *curr, *next;
721 if ((!mlist) || (!lpfc_is_link_up(phba) &&
722 (phba->link_flag & LS_LOOPBACK_MODE))) {
726 pcidev = phba->pcidev;
727 list_add_tail(&head, &mlist->dma.list);
729 list_for_each_safe(curr, next, &head) {
730 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
732 dma_free_coherent(&pcidev->dev,
742 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
747 * This function is called when an unsolicited CT command is received. It
748 * forwards the event to any processes registered to receive CT events.
751 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
752 struct lpfc_iocbq *piocbq)
754 uint32_t evt_req_id = 0;
757 struct lpfc_dmabuf *dmabuf = NULL;
758 struct lpfc_bsg_event *evt;
759 struct event_data *evt_dat = NULL;
760 struct lpfc_iocbq *iocbq;
762 struct list_head head;
763 struct ulp_bde64 *bde;
766 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
767 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
768 struct lpfc_hbq_entry *hbqe;
769 struct lpfc_sli_ct_request *ct_req;
770 struct fc_bsg_job *job = NULL;
774 INIT_LIST_HEAD(&head);
775 list_add_tail(&head, &piocbq->list);
777 if (piocbq->iocb.ulpBdeCount == 0 ||
778 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
779 goto error_ct_unsol_exit;
781 if (phba->link_state == LPFC_HBA_ERROR ||
782 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
783 goto error_ct_unsol_exit;
785 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
788 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
789 piocbq->iocb.un.cont64[0].addrLow);
790 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
793 goto error_ct_unsol_exit;
794 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
795 evt_req_id = ct_req->FsType;
796 cmd = ct_req->CommandResponse.bits.CmdRsp;
797 len = ct_req->CommandResponse.bits.Size;
798 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
799 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
801 spin_lock_irqsave(&phba->ct_ev_lock, flags);
802 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
803 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
804 evt->req_id != evt_req_id)
807 lpfc_bsg_event_ref(evt);
808 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
809 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
810 if (evt_dat == NULL) {
811 spin_lock_irqsave(&phba->ct_ev_lock, flags);
812 lpfc_bsg_event_unref(evt);
813 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
814 "2614 Memory allocation failed for "
819 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
820 /* take accumulated byte count from the last iocbq */
821 iocbq = list_entry(head.prev, typeof(*iocbq), list);
822 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
824 list_for_each_entry(iocbq, &head, list) {
825 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
827 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
831 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
832 if (evt_dat->data == NULL) {
833 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
834 "2615 Memory allocation failed for "
835 "CT event data, size %d\n",
838 spin_lock_irqsave(&phba->ct_ev_lock, flags);
839 lpfc_bsg_event_unref(evt);
840 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
841 goto error_ct_unsol_exit;
844 list_for_each_entry(iocbq, &head, list) {
846 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
847 bdeBuf1 = iocbq->context2;
848 bdeBuf2 = iocbq->context3;
850 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
851 if (phba->sli3_options &
852 LPFC_SLI3_HBQ_ENABLED) {
854 hbqe = (struct lpfc_hbq_entry *)
855 &iocbq->iocb.un.ulpWord[0];
856 size = hbqe->bde.tus.f.bdeSize;
859 hbqe = (struct lpfc_hbq_entry *)
862 size = hbqe->bde.tus.f.bdeSize;
865 if ((offset + size) > evt_dat->len)
866 size = evt_dat->len - offset;
868 size = iocbq->iocb.un.cont64[i].
870 bde = &iocbq->iocb.un.cont64[i];
871 dma_addr = getPaddr(bde->addrHigh,
873 dmabuf = lpfc_sli_ringpostbuf_get(phba,
877 lpfc_printf_log(phba, KERN_ERR,
878 LOG_LIBDFC, "2616 No dmabuf "
879 "found for iocbq 0x%p\n",
881 kfree(evt_dat->data);
883 spin_lock_irqsave(&phba->ct_ev_lock,
885 lpfc_bsg_event_unref(evt);
886 spin_unlock_irqrestore(
887 &phba->ct_ev_lock, flags);
888 goto error_ct_unsol_exit;
890 memcpy((char *)(evt_dat->data) + offset,
893 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
894 !(phba->sli3_options &
895 LPFC_SLI3_HBQ_ENABLED)) {
896 lpfc_sli_ringpostbuf_put(phba, pring,
900 case ELX_LOOPBACK_DATA:
901 diag_cmd_data_free(phba,
902 (struct lpfc_dmabufext *)
905 case ELX_LOOPBACK_XRI_SETUP:
906 if ((phba->sli_rev ==
908 (phba->sli3_options &
909 LPFC_SLI3_HBQ_ENABLED
911 lpfc_in_buf_free(phba,
914 lpfc_post_buffer(phba,
920 if (!(phba->sli3_options &
921 LPFC_SLI3_HBQ_ENABLED))
922 lpfc_post_buffer(phba,
931 spin_lock_irqsave(&phba->ct_ev_lock, flags);
932 if (phba->sli_rev == LPFC_SLI_REV4) {
933 evt_dat->immed_dat = phba->ctx_idx;
934 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
935 phba->ct_ctx[evt_dat->immed_dat].oxid =
936 piocbq->iocb.ulpContext;
937 phba->ct_ctx[evt_dat->immed_dat].SID =
938 piocbq->iocb.un.rcvels.remoteID;
940 evt_dat->immed_dat = piocbq->iocb.ulpContext;
942 evt_dat->type = FC_REG_CT_EVENT;
943 list_add(&evt_dat->node, &evt->events_to_see);
944 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
945 wake_up_interruptible(&evt->wq);
946 lpfc_bsg_event_unref(evt);
950 list_move(evt->events_to_see.prev, &evt->events_to_get);
951 lpfc_bsg_event_unref(evt);
956 job->reply->reply_payload_rcv_len = size;
957 /* make error code available to userspace */
958 job->reply->result = 0;
960 /* complete the job back to userspace */
961 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
963 spin_lock_irqsave(&phba->ct_ev_lock, flags);
966 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
969 if (!list_empty(&head))
971 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
977 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
978 * @job: SET_EVENT fc_bsg_job
981 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
983 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
984 struct lpfc_hba *phba = vport->phba;
985 struct set_ct_event *event_req;
986 struct lpfc_bsg_event *evt;
988 struct bsg_job_data *dd_data = NULL;
992 if (job->request_len <
993 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
994 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
995 "2612 Received SET_CT_EVENT below minimum "
1001 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1002 if (dd_data == NULL) {
1003 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1004 "2734 Failed allocation of dd_data\n");
1009 event_req = (struct set_ct_event *)
1010 job->request->rqst_data.h_vendor.vendor_cmd;
1011 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1013 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1014 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1015 if (evt->reg_id == event_req->ev_reg_id) {
1016 lpfc_bsg_event_ref(evt);
1017 evt->wait_time_stamp = jiffies;
1021 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1023 if (&evt->node == &phba->ct_ev_waiters) {
1024 /* no event waiting struct yet - first call */
1025 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1026 event_req->ev_req_id);
1028 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1029 "2617 Failed allocation of event "
1035 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1036 list_add(&evt->node, &phba->ct_ev_waiters);
1037 lpfc_bsg_event_ref(evt);
1038 evt->wait_time_stamp = jiffies;
1039 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1042 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1044 dd_data->type = TYPE_EVT;
1045 dd_data->context_un.evt = evt;
1046 evt->set_job = job; /* for unsolicited command */
1047 job->dd_data = dd_data; /* for fc transport timeout callback*/
1048 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1049 return 0; /* call job done later */
1052 if (dd_data != NULL)
1055 job->dd_data = NULL;
1060 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1061 * @job: GET_EVENT fc_bsg_job
1064 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1066 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1067 struct lpfc_hba *phba = vport->phba;
1068 struct get_ct_event *event_req;
1069 struct get_ct_event_reply *event_reply;
1070 struct lpfc_bsg_event *evt;
1071 struct event_data *evt_dat = NULL;
1072 unsigned long flags;
1075 if (job->request_len <
1076 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1077 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1078 "2613 Received GET_CT_EVENT request below "
1084 event_req = (struct get_ct_event *)
1085 job->request->rqst_data.h_vendor.vendor_cmd;
1087 event_reply = (struct get_ct_event_reply *)
1088 job->reply->reply_data.vendor_reply.vendor_rsp;
1089 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1090 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1091 if (evt->reg_id == event_req->ev_reg_id) {
1092 if (list_empty(&evt->events_to_get))
1094 lpfc_bsg_event_ref(evt);
1095 evt->wait_time_stamp = jiffies;
1096 evt_dat = list_entry(evt->events_to_get.prev,
1097 struct event_data, node);
1098 list_del(&evt_dat->node);
1102 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1104 /* The app may continue to ask for event data until it gets
1105 * an error indicating that there isn't anymore
1107 if (evt_dat == NULL) {
1108 job->reply->reply_payload_rcv_len = 0;
1113 if (evt_dat->len > job->request_payload.payload_len) {
1114 evt_dat->len = job->request_payload.payload_len;
1115 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1116 "2618 Truncated event data at %d "
1118 job->request_payload.payload_len);
1121 event_reply->type = evt_dat->type;
1122 event_reply->immed_data = evt_dat->immed_dat;
1123 if (evt_dat->len > 0)
1124 job->reply->reply_payload_rcv_len =
1125 sg_copy_from_buffer(job->request_payload.sg_list,
1126 job->request_payload.sg_cnt,
1127 evt_dat->data, evt_dat->len);
1129 job->reply->reply_payload_rcv_len = 0;
1132 kfree(evt_dat->data);
1136 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1137 lpfc_bsg_event_unref(evt);
1138 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1139 job->dd_data = NULL;
1140 job->reply->result = 0;
1145 job->dd_data = NULL;
1146 job->reply->result = rc;
1151 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1152 * @phba: Pointer to HBA context object.
1153 * @cmdiocbq: Pointer to command iocb.
1154 * @rspiocbq: Pointer to response iocb.
1156 * This function is the completion handler for iocbs issued using
1157 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1158 * ring event handler function without any lock held. This function
1159 * can be called from both worker thread context and interrupt
1160 * context. This function also can be called from other thread which
1161 * cleans up the SLI layer objects.
1162 * This function copy the contents of the response iocb to the
1163 * response iocb memory object provided by the caller of
1164 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1165 * sleeps for the iocb completion.
1168 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1169 struct lpfc_iocbq *cmdiocbq,
1170 struct lpfc_iocbq *rspiocbq)
1172 struct bsg_job_data *dd_data;
1173 struct fc_bsg_job *job;
1175 struct lpfc_dmabuf *bmp;
1176 struct lpfc_nodelist *ndlp;
1177 unsigned long flags;
1180 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1181 dd_data = cmdiocbq->context1;
1182 /* normal completion and timeout crossed paths, already done */
1184 spin_unlock_irqrestore(&phba->hbalock, flags);
1188 job = dd_data->context_un.iocb.set_job;
1189 bmp = dd_data->context_un.iocb.bmp;
1190 rsp = &rspiocbq->iocb;
1191 ndlp = dd_data->context_un.iocb.ndlp;
1193 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1194 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1196 if (rsp->ulpStatus) {
1197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1198 switch (rsp->un.ulpWord[4] & 0xff) {
1199 case IOERR_SEQUENCE_TIMEOUT:
1202 case IOERR_INVALID_RPI:
1212 job->reply->reply_payload_rcv_len =
1213 rsp->un.genreq64.bdl.bdeSize;
1215 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1216 lpfc_sli_release_iocbq(phba, cmdiocbq);
1220 /* make error code available to userspace */
1221 job->reply->result = rc;
1222 job->dd_data = NULL;
1223 /* complete the job back to userspace */
1225 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1230 * lpfc_issue_ct_rsp - issue a ct response
1231 * @phba: Pointer to HBA context object.
1232 * @job: Pointer to the job object.
1233 * @tag: tag index value into the ports context exchange array.
1234 * @bmp: Pointer to a dma buffer descriptor.
1235 * @num_entry: Number of enties in the bde.
1238 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1239 struct lpfc_dmabuf *bmp, int num_entry)
1242 struct lpfc_iocbq *ctiocb = NULL;
1244 struct lpfc_nodelist *ndlp = NULL;
1245 struct bsg_job_data *dd_data;
1248 /* allocate our bsg tracking structure */
1249 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1251 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1252 "2736 Failed allocation of dd_data\n");
1257 /* Allocate buffer for command iocb */
1258 ctiocb = lpfc_sli_get_iocbq(phba);
1264 icmd = &ctiocb->iocb;
1265 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1266 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1267 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1268 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1269 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1270 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1271 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1272 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1273 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1275 /* Fill in rest of iocb */
1276 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1277 icmd->ulpBdeCount = 1;
1279 icmd->ulpClass = CLASS3;
1280 if (phba->sli_rev == LPFC_SLI_REV4) {
1281 /* Do not issue unsol response if oxid not marked as valid */
1282 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1284 goto issue_ct_rsp_exit;
1286 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1287 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1289 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1290 "2721 ndlp null for oxid %x SID %x\n",
1292 phba->ct_ctx[tag].SID);
1294 goto issue_ct_rsp_exit;
1296 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1297 /* The exchange is done, mark the entry as invalid */
1298 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1300 icmd->ulpContext = (ushort) tag;
1302 icmd->ulpTimeout = phba->fc_ratov * 2;
1304 /* Xmit CT response on exchange <xid> */
1305 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1306 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1307 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1309 ctiocb->iocb_cmpl = NULL;
1310 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1311 ctiocb->vport = phba->pport;
1312 ctiocb->context3 = bmp;
1314 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1315 ctiocb->context1 = dd_data;
1316 ctiocb->context2 = NULL;
1317 dd_data->type = TYPE_IOCB;
1318 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1319 dd_data->context_un.iocb.rspiocbq = NULL;
1320 dd_data->context_un.iocb.set_job = job;
1321 dd_data->context_un.iocb.bmp = bmp;
1322 dd_data->context_un.iocb.ndlp = ndlp;
1324 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1325 creg_val = readl(phba->HCregaddr);
1326 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1327 writel(creg_val, phba->HCregaddr);
1328 readl(phba->HCregaddr); /* flush */
1331 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1333 if (rc == IOCB_SUCCESS)
1334 return 0; /* done for now */
1337 lpfc_sli_release_iocbq(phba, ctiocb);
1345 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1346 * @job: SEND_MGMT_RESP fc_bsg_job
1349 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1351 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1352 struct lpfc_hba *phba = vport->phba;
1353 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1354 job->request->rqst_data.h_vendor.vendor_cmd;
1355 struct ulp_bde64 *bpl;
1356 struct lpfc_dmabuf *bmp = NULL;
1357 struct scatterlist *sgel = NULL;
1361 uint32_t tag = mgmt_resp->tag;
1362 unsigned long reqbfrcnt =
1363 (unsigned long)job->request_payload.payload_len;
1366 /* in case no data is transferred */
1367 job->reply->reply_payload_rcv_len = 0;
1369 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1371 goto send_mgmt_rsp_exit;
1374 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1377 goto send_mgmt_rsp_exit;
1380 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1383 goto send_mgmt_rsp_free_bmp;
1386 INIT_LIST_HEAD(&bmp->list);
1387 bpl = (struct ulp_bde64 *) bmp->virt;
1388 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1389 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1390 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1391 busaddr = sg_dma_address(sgel);
1392 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1393 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1394 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1395 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1396 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1400 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1402 if (rc == IOCB_SUCCESS)
1403 return 0; /* done for now */
1405 /* TBD need to handle a timeout */
1406 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1407 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1409 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1411 send_mgmt_rsp_free_bmp:
1414 /* make error code available to userspace */
1415 job->reply->result = rc;
1416 job->dd_data = NULL;
1421 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1422 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1424 * This function is responsible for placing a port into diagnostic loopback
1425 * mode in order to perform a diagnostic loopback test.
1426 * All new scsi requests are blocked, a small delay is used to allow the
1427 * scsi requests to complete then the link is brought down. If the link is
1428 * is placed in loopback mode then scsi requests are again allowed
1429 * so the scsi mid-layer doesn't give up on the port.
1430 * All of this is done in-line.
1433 lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1435 struct Scsi_Host *shost = job->shost;
1436 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1437 struct lpfc_hba *phba = vport->phba;
1438 struct diag_mode_set *loopback_mode;
1439 struct lpfc_sli *psli = &phba->sli;
1440 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1441 uint32_t link_flags;
1443 struct lpfc_vport **vports;
1444 LPFC_MBOXQ_t *pmboxq;
1449 /* no data to return just the return code */
1450 job->reply->reply_payload_rcv_len = 0;
1452 if (job->request_len <
1453 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1454 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1455 "2738 Received DIAG MODE request below minimum "
1461 loopback_mode = (struct diag_mode_set *)
1462 job->request->rqst_data.h_vendor.vendor_cmd;
1463 link_flags = loopback_mode->type;
1464 timeout = loopback_mode->timeout;
1466 if ((phba->link_state == LPFC_HBA_ERROR) ||
1467 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1468 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1473 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1479 vports = lpfc_create_vport_work_array(phba);
1481 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1482 shost = lpfc_shost_from_vport(vports[i]);
1483 scsi_block_requests(shost);
1486 lpfc_destroy_vport_work_array(phba, vports);
1488 shost = lpfc_shost_from_vport(phba->pport);
1489 scsi_block_requests(shost);
1492 while (pring->txcmplq_cnt) {
1493 if (i++ > 500) /* wait up to 5 seconds */
1499 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1500 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1501 pmboxq->u.mb.mbxOwner = OWN_HOST;
1503 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1505 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1506 /* wait for link down before proceeding */
1508 while (phba->link_state != LPFC_LINK_DOWN) {
1509 if (i++ > timeout) {
1511 goto loopback_mode_exit;
1517 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1518 if (link_flags == INTERNAL_LOOP_BACK)
1519 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1521 pmboxq->u.mb.un.varInitLnk.link_flags =
1522 FLAGS_TOPOLOGY_MODE_LOOP;
1524 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1525 pmboxq->u.mb.mbxOwner = OWN_HOST;
1527 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1530 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1533 phba->link_flag |= LS_LOOPBACK_MODE;
1534 /* wait for the link attention interrupt */
1538 while (phba->link_state != LPFC_HBA_READY) {
1539 if (i++ > timeout) {
1552 vports = lpfc_create_vport_work_array(phba);
1554 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1555 shost = lpfc_shost_from_vport(vports[i]);
1556 scsi_unblock_requests(shost);
1558 lpfc_destroy_vport_work_array(phba, vports);
1560 shost = lpfc_shost_from_vport(phba->pport);
1561 scsi_unblock_requests(shost);
1565 * Let SLI layer release mboxq if mbox command completed after timeout.
1567 if (mbxstatus != MBX_TIMEOUT)
1568 mempool_free(pmboxq, phba->mbox_mem_pool);
1571 /* make error code available to userspace */
1572 job->reply->result = rc;
1573 /* complete the job back to userspace if no error */
1580 * lpfcdiag_loop_self_reg - obtains a remote port login id
1581 * @phba: Pointer to HBA context object
1582 * @rpi: Pointer to a remote port login id
1584 * This function obtains a remote port login id so the diag loopback test
1585 * can send and receive its own unsolicited CT command.
1587 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1590 struct lpfc_dmabuf *dmabuff;
1593 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1597 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1598 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1600 mempool_free(mbox, phba->mbox_mem_pool);
1604 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1605 mbox->context1 = NULL;
1606 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1608 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1609 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1611 if (status != MBX_TIMEOUT)
1612 mempool_free(mbox, phba->mbox_mem_pool);
1616 *rpi = mbox->u.mb.un.varWords[0];
1618 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1620 mempool_free(mbox, phba->mbox_mem_pool);
1625 * lpfcdiag_loop_self_unreg - unregs from the rpi
1626 * @phba: Pointer to HBA context object
1627 * @rpi: Remote port login id
1629 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1631 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1636 /* Allocate mboxq structure */
1637 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1641 lpfc_unreg_login(phba, 0, rpi, mbox);
1642 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1644 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1645 if (status != MBX_TIMEOUT)
1646 mempool_free(mbox, phba->mbox_mem_pool);
1650 mempool_free(mbox, phba->mbox_mem_pool);
1655 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1656 * @phba: Pointer to HBA context object
1657 * @rpi: Remote port login id
1658 * @txxri: Pointer to transmit exchange id
1659 * @rxxri: Pointer to response exchabge id
1661 * This function obtains the transmit and receive ids required to send
1662 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1663 * flags are used to the unsolicted response handler is able to process
1664 * the ct command sent on the same port.
1666 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1667 uint16_t *txxri, uint16_t * rxxri)
1669 struct lpfc_bsg_event *evt;
1670 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1672 struct lpfc_dmabuf *dmabuf;
1673 struct ulp_bde64 *bpl = NULL;
1674 struct lpfc_sli_ct_request *ctreq = NULL;
1676 unsigned long flags;
1680 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1681 SLI_CT_ELX_LOOPBACK);
1685 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1686 list_add(&evt->node, &phba->ct_ev_waiters);
1687 lpfc_bsg_event_ref(evt);
1688 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1690 cmdiocbq = lpfc_sli_get_iocbq(phba);
1691 rspiocbq = lpfc_sli_get_iocbq(phba);
1693 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1695 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1696 INIT_LIST_HEAD(&dmabuf->list);
1697 bpl = (struct ulp_bde64 *) dmabuf->virt;
1698 memset(bpl, 0, sizeof(*bpl));
1699 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1701 le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
1703 le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
1704 bpl->tus.f.bdeFlags = 0;
1705 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1706 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1709 if (cmdiocbq == NULL || rspiocbq == NULL ||
1710 dmabuf == NULL || bpl == NULL || ctreq == NULL) {
1712 goto err_get_xri_exit;
1715 cmd = &cmdiocbq->iocb;
1716 rsp = &rspiocbq->iocb;
1718 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1720 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1721 ctreq->RevisionId.bits.InId = 0;
1722 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1723 ctreq->FsSubType = 0;
1724 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1725 ctreq->CommandResponse.bits.Size = 0;
1728 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1729 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1730 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1731 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1733 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1734 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1735 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1736 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1738 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1739 cmd->ulpBdeCount = 1;
1741 cmd->ulpClass = CLASS3;
1742 cmd->ulpContext = rpi;
1744 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1745 cmdiocbq->vport = phba->pport;
1747 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1749 (phba->fc_ratov * 2)
1750 + LPFC_DRVR_TIMEOUT);
1752 goto err_get_xri_exit;
1754 *txxri = rsp->ulpContext;
1757 evt->wait_time_stamp = jiffies;
1758 ret_val = wait_event_interruptible_timeout(
1759 evt->wq, !list_empty(&evt->events_to_see),
1760 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1761 if (list_empty(&evt->events_to_see))
1762 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1764 ret_val = IOCB_SUCCESS;
1765 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1766 list_move(evt->events_to_see.prev, &evt->events_to_get);
1767 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1768 *rxxri = (list_entry(evt->events_to_get.prev,
1769 typeof(struct event_data),
1775 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1776 lpfc_bsg_event_unref(evt); /* release ref */
1777 lpfc_bsg_event_unref(evt); /* delete */
1778 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1782 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1786 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1787 lpfc_sli_release_iocbq(phba, cmdiocbq);
1789 lpfc_sli_release_iocbq(phba, rspiocbq);
1794 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1795 * @phba: Pointer to HBA context object
1796 * @bpl: Pointer to 64 bit bde structure
1797 * @size: Number of bytes to process
1798 * @nocopydata: Flag to copy user data into the allocated buffer
1800 * This function allocates page size buffers and populates an lpfc_dmabufext.
1801 * If allowed the user data pointed to with indataptr is copied into the kernel
1802 * memory. The chained list of page size buffers is returned.
1804 static struct lpfc_dmabufext *
1805 diag_cmd_data_alloc(struct lpfc_hba *phba,
1806 struct ulp_bde64 *bpl, uint32_t size,
1809 struct lpfc_dmabufext *mlist = NULL;
1810 struct lpfc_dmabufext *dmp;
1811 int cnt, offset = 0, i = 0;
1812 struct pci_dev *pcidev;
1814 pcidev = phba->pcidev;
1817 /* We get chunks of 4K */
1818 if (size > BUF_SZ_4K)
1823 /* allocate struct lpfc_dmabufext buffer header */
1824 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1828 INIT_LIST_HEAD(&dmp->dma.list);
1830 /* Queue it to a linked list */
1832 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1836 /* allocate buffer */
1837 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1848 bpl->tus.f.bdeFlags = 0;
1849 pci_dma_sync_single_for_device(phba->pcidev,
1850 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1853 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1854 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1857 /* build buffer ptr list for IOCB */
1858 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1859 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1860 bpl->tus.f.bdeSize = (ushort) cnt;
1861 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1872 diag_cmd_data_free(phba, mlist);
1877 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1878 * @phba: Pointer to HBA context object
1879 * @rxxri: Receive exchange id
1880 * @len: Number of data bytes
1882 * This function allocates and posts a data buffer of sufficient size to recieve
1883 * an unsolicted CT command.
1885 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1888 struct lpfc_sli *psli = &phba->sli;
1889 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1890 struct lpfc_iocbq *cmdiocbq;
1892 struct list_head head, *curr, *next;
1893 struct lpfc_dmabuf *rxbmp;
1894 struct lpfc_dmabuf *dmp;
1895 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1896 struct ulp_bde64 *rxbpl = NULL;
1898 struct lpfc_dmabufext *rxbuffer = NULL;
1902 cmdiocbq = lpfc_sli_get_iocbq(phba);
1903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1904 if (rxbmp != NULL) {
1905 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1906 INIT_LIST_HEAD(&rxbmp->list);
1907 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1908 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1911 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1913 goto err_post_rxbufs_exit;
1916 /* Queue buffers for the receive exchange */
1917 num_bde = (uint32_t)rxbuffer->flag;
1918 dmp = &rxbuffer->dma;
1920 cmd = &cmdiocbq->iocb;
1923 INIT_LIST_HEAD(&head);
1924 list_add_tail(&head, &dmp->list);
1925 list_for_each_safe(curr, next, &head) {
1926 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1929 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1930 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1931 cmd->un.quexri64cx.buff.bde.addrHigh =
1932 putPaddrHigh(mp[i]->phys);
1933 cmd->un.quexri64cx.buff.bde.addrLow =
1934 putPaddrLow(mp[i]->phys);
1935 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1936 ((struct lpfc_dmabufext *)mp[i])->size;
1937 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1938 cmd->ulpCommand = CMD_QUE_XRI64_CX;
1941 cmd->ulpBdeCount = 1;
1942 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1945 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1946 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1947 cmd->un.cont64[i].tus.f.bdeSize =
1948 ((struct lpfc_dmabufext *)mp[i])->size;
1949 cmd->ulpBdeCount = ++i;
1951 if ((--num_bde > 0) && (i < 2))
1954 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1958 cmd->ulpClass = CLASS3;
1959 cmd->ulpContext = rxxri;
1961 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1963 if (ret_val == IOCB_ERROR) {
1964 diag_cmd_data_free(phba,
1965 (struct lpfc_dmabufext *)mp[0]);
1967 diag_cmd_data_free(phba,
1968 (struct lpfc_dmabufext *)mp[1]);
1969 dmp = list_entry(next, struct lpfc_dmabuf, list);
1971 goto err_post_rxbufs_exit;
1974 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
1976 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
1980 /* The iocb was freed by lpfc_sli_issue_iocb */
1981 cmdiocbq = lpfc_sli_get_iocbq(phba);
1983 dmp = list_entry(next, struct lpfc_dmabuf, list);
1985 goto err_post_rxbufs_exit;
1988 cmd = &cmdiocbq->iocb;
1993 err_post_rxbufs_exit:
1997 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2002 lpfc_sli_release_iocbq(phba, cmdiocbq);
2007 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2008 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2010 * This function receives a user data buffer to be transmitted and received on
2011 * the same port, the link must be up and in loopback mode prior
2013 * 1. A kernel buffer is allocated to copy the user data into.
2014 * 2. The port registers with "itself".
2015 * 3. The transmit and receive exchange ids are obtained.
2016 * 4. The receive exchange id is posted.
2017 * 5. A new els loopback event is created.
2018 * 6. The command and response iocbs are allocated.
2019 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2021 * This function is meant to be called n times while the port is in loopback
2022 * so it is the apps responsibility to issue a reset to take the port out
2026 lpfc_bsg_diag_test(struct fc_bsg_job *job)
2028 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2029 struct lpfc_hba *phba = vport->phba;
2030 struct diag_mode_test *diag_mode;
2031 struct lpfc_bsg_event *evt;
2032 struct event_data *evdat;
2033 struct lpfc_sli *psli = &phba->sli;
2036 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2038 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2040 struct lpfc_sli_ct_request *ctreq;
2041 struct lpfc_dmabuf *txbmp;
2042 struct ulp_bde64 *txbpl = NULL;
2043 struct lpfc_dmabufext *txbuffer = NULL;
2044 struct list_head head;
2045 struct lpfc_dmabuf *curr;
2046 uint16_t txxri, rxxri;
2048 uint8_t *ptr = NULL, *rx_databuf = NULL;
2050 unsigned long flags;
2051 void *dataout = NULL;
2054 /* in case no data is returned return just the return code */
2055 job->reply->reply_payload_rcv_len = 0;
2057 if (job->request_len <
2058 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2059 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2060 "2739 Received DIAG TEST request below minimum "
2063 goto loopback_test_exit;
2066 if (job->request_payload.payload_len !=
2067 job->reply_payload.payload_len) {
2069 goto loopback_test_exit;
2072 diag_mode = (struct diag_mode_test *)
2073 job->request->rqst_data.h_vendor.vendor_cmd;
2075 if ((phba->link_state == LPFC_HBA_ERROR) ||
2076 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2077 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2079 goto loopback_test_exit;
2082 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2084 goto loopback_test_exit;
2087 size = job->request_payload.payload_len;
2088 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2090 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2092 goto loopback_test_exit;
2095 if (size >= BUF_SZ_4K) {
2097 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2098 * then we allocate 64k and re-use that buffer over and over to
2099 * xfer the whole block. This is because Linux kernel has a
2100 * problem allocating more than 120k of kernel space memory. Saw
2101 * problem with GET_FCPTARGETMAPPING...
2103 if (size <= (64 * 1024))
2106 total_mem = 64 * 1024;
2108 /* Allocate memory for ioctl data */
2109 total_mem = BUF_SZ_4K;
2111 dataout = kmalloc(total_mem, GFP_KERNEL);
2112 if (dataout == NULL) {
2114 goto loopback_test_exit;
2118 ptr += ELX_LOOPBACK_HEADER_SZ;
2119 sg_copy_to_buffer(job->request_payload.sg_list,
2120 job->request_payload.sg_cnt,
2123 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2126 goto loopback_test_exit;
2129 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2131 lpfcdiag_loop_self_unreg(phba, rpi);
2133 goto loopback_test_exit;
2136 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2138 lpfcdiag_loop_self_unreg(phba, rpi);
2140 goto loopback_test_exit;
2143 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2144 SLI_CT_ELX_LOOPBACK);
2146 lpfcdiag_loop_self_unreg(phba, rpi);
2148 goto loopback_test_exit;
2151 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2152 list_add(&evt->node, &phba->ct_ev_waiters);
2153 lpfc_bsg_event_ref(evt);
2154 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2156 cmdiocbq = lpfc_sli_get_iocbq(phba);
2157 rspiocbq = lpfc_sli_get_iocbq(phba);
2158 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2161 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2162 INIT_LIST_HEAD(&txbmp->list);
2163 txbpl = (struct ulp_bde64 *) txbmp->virt;
2165 txbuffer = diag_cmd_data_alloc(phba,
2166 txbpl, full_size, 0);
2169 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
2171 goto err_loopback_test_exit;
2174 cmd = &cmdiocbq->iocb;
2175 rsp = &rspiocbq->iocb;
2177 INIT_LIST_HEAD(&head);
2178 list_add_tail(&head, &txbuffer->dma.list);
2179 list_for_each_entry(curr, &head, list) {
2180 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2181 if (current_offset == 0) {
2183 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2184 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2185 ctreq->RevisionId.bits.InId = 0;
2186 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2187 ctreq->FsSubType = 0;
2188 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2189 ctreq->CommandResponse.bits.Size = size;
2190 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2194 BUG_ON(segment_offset >= segment_len);
2195 memcpy(curr->virt + segment_offset,
2196 ptr + current_offset,
2197 segment_len - segment_offset);
2199 current_offset += segment_len - segment_offset;
2200 BUG_ON(current_offset > size);
2204 /* Build the XMIT_SEQUENCE iocb */
2206 num_bde = (uint32_t)txbuffer->flag;
2208 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2209 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2210 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2211 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2213 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2214 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2215 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2216 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2218 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2219 cmd->ulpBdeCount = 1;
2221 cmd->ulpClass = CLASS3;
2222 cmd->ulpContext = txxri;
2224 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2225 cmdiocbq->vport = phba->pport;
2227 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2228 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2230 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2232 goto err_loopback_test_exit;
2236 rc = wait_event_interruptible_timeout(
2237 evt->wq, !list_empty(&evt->events_to_see),
2238 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2240 if (list_empty(&evt->events_to_see))
2241 rc = (rc) ? -EINTR : -ETIMEDOUT;
2243 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2244 list_move(evt->events_to_see.prev, &evt->events_to_get);
2245 evdat = list_entry(evt->events_to_get.prev,
2246 typeof(*evdat), node);
2247 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2248 rx_databuf = evdat->data;
2249 if (evdat->len != full_size) {
2250 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2251 "1603 Loopback test did not receive expected "
2252 "data length. actual length 0x%x expected "
2254 evdat->len, full_size);
2256 } else if (rx_databuf == NULL)
2260 /* skip over elx loopback header */
2261 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2262 job->reply->reply_payload_rcv_len =
2263 sg_copy_from_buffer(job->reply_payload.sg_list,
2264 job->reply_payload.sg_cnt,
2266 job->reply->reply_payload_rcv_len = size;
2270 err_loopback_test_exit:
2271 lpfcdiag_loop_self_unreg(phba, rpi);
2273 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2274 lpfc_bsg_event_unref(evt); /* release ref */
2275 lpfc_bsg_event_unref(evt); /* delete */
2276 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2278 if (cmdiocbq != NULL)
2279 lpfc_sli_release_iocbq(phba, cmdiocbq);
2281 if (rspiocbq != NULL)
2282 lpfc_sli_release_iocbq(phba, rspiocbq);
2284 if (txbmp != NULL) {
2285 if (txbpl != NULL) {
2286 if (txbuffer != NULL)
2287 diag_cmd_data_free(phba, txbuffer);
2288 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2295 /* make error code available to userspace */
2296 job->reply->result = rc;
2297 job->dd_data = NULL;
2298 /* complete the job back to userspace if no error */
2305 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2306 * @job: GET_DFC_REV fc_bsg_job
2309 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2311 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2312 struct lpfc_hba *phba = vport->phba;
2313 struct get_mgmt_rev *event_req;
2314 struct get_mgmt_rev_reply *event_reply;
2317 if (job->request_len <
2318 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2319 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2320 "2740 Received GET_DFC_REV request below "
2326 event_req = (struct get_mgmt_rev *)
2327 job->request->rqst_data.h_vendor.vendor_cmd;
2329 event_reply = (struct get_mgmt_rev_reply *)
2330 job->reply->reply_data.vendor_reply.vendor_rsp;
2332 if (job->reply_len <
2333 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2334 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2335 "2741 Received GET_DFC_REV reply below "
2341 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2342 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2344 job->reply->result = rc;
2351 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2352 * @phba: Pointer to HBA context object.
2353 * @pmboxq: Pointer to mailbox command.
2355 * This is completion handler function for mailbox commands issued from
2356 * lpfc_bsg_issue_mbox function. This function is called by the
2357 * mailbox event handler function with no lock held. This function
2358 * will wake up thread waiting on the wait queue pointed by context1
2362 lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2364 struct bsg_job_data *dd_data;
2367 struct fc_bsg_job *job;
2369 unsigned long flags;
2371 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2372 dd_data = pmboxq->context1;
2374 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2378 pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
2379 mb = dd_data->context_un.mbox.mb;
2380 job = dd_data->context_un.mbox.set_job;
2381 memcpy(mb, pmb, sizeof(*pmb));
2382 size = job->request_payload.payload_len;
2383 job->reply->reply_payload_rcv_len =
2384 sg_copy_from_buffer(job->reply_payload.sg_list,
2385 job->reply_payload.sg_cnt,
2387 job->reply->result = 0;
2388 dd_data->context_un.mbox.set_job = NULL;
2389 job->dd_data = NULL;
2391 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2392 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2399 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2400 * @phba: Pointer to HBA context object.
2401 * @mb: Pointer to a mailbox object.
2402 * @vport: Pointer to a vport object.
2404 * Some commands require the port to be offline, some may not be called from
2407 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2408 MAILBOX_t *mb, struct lpfc_vport *vport)
2410 /* return negative error values for bsg job */
2411 switch (mb->mbxCommand) {
2415 case MBX_CONFIG_LINK:
2416 case MBX_CONFIG_RING:
2417 case MBX_RESET_RING:
2418 case MBX_UNREG_LOGIN:
2420 case MBX_DUMP_CONTEXT:
2424 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2425 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2426 "2743 Command 0x%x is illegal in on-line "
2432 case MBX_WRITE_VPARMS:
2435 case MBX_READ_CONFIG:
2436 case MBX_READ_RCONFIG:
2437 case MBX_READ_STATUS:
2440 case MBX_READ_LNK_STAT:
2441 case MBX_DUMP_MEMORY:
2443 case MBX_UPDATE_CFG:
2444 case MBX_KILL_BOARD:
2446 case MBX_LOAD_EXP_ROM:
2448 case MBX_DEL_LD_ENTRY:
2451 case MBX_SLI4_CONFIG:
2452 case MBX_READ_EVENT_LOG_STATUS:
2453 case MBX_WRITE_EVENT_LOG:
2454 case MBX_PORT_CAPABILITIES:
2455 case MBX_PORT_IOV_CONTROL:
2457 case MBX_SET_VARIABLE:
2458 case MBX_RUN_BIU_DIAG64:
2459 case MBX_READ_EVENT_LOG:
2460 case MBX_READ_SPARM64:
2464 case MBX_REG_LOGIN64:
2465 case MBX_CONFIG_PORT:
2466 case MBX_RUN_BIU_DIAG:
2468 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2469 "2742 Unknown Command 0x%x\n",
2478 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2479 * @phba: Pointer to HBA context object.
2480 * @mb: Pointer to a mailbox object.
2481 * @vport: Pointer to a vport object.
2483 * Allocate a tracking object, mailbox command memory, get a mailbox
2484 * from the mailbox pool, copy the caller mailbox command.
2486 * If offline and the sli is active we need to poll for the command (port is
2487 * being reset) and com-plete the job, otherwise issue the mailbox command and
2488 * let our completion handler finish the command.
2491 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2492 struct lpfc_vport *vport)
2494 LPFC_MBOXQ_t *pmboxq;
2497 struct bsg_job_data *dd_data;
2501 /* allocate our bsg tracking structure */
2502 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2504 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2505 "2727 Failed allocation of dd_data\n");
2509 mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2515 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2522 size = job->request_payload.payload_len;
2523 job->reply->reply_payload_rcv_len =
2524 sg_copy_to_buffer(job->request_payload.sg_list,
2525 job->request_payload.sg_cnt,
2528 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2532 mempool_free(pmboxq, phba->mbox_mem_pool);
2533 return rc; /* must be negative */
2536 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2537 pmb = &pmboxq->u.mb;
2538 memcpy(pmb, mb, sizeof(*pmb));
2539 pmb->mbxOwner = OWN_HOST;
2540 pmboxq->context1 = NULL;
2541 pmboxq->vport = vport;
2543 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2544 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2545 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2546 if (rc != MBX_SUCCESS) {
2547 if (rc != MBX_TIMEOUT) {
2550 mempool_free(pmboxq, phba->mbox_mem_pool);
2552 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2555 memcpy(mb, pmb, sizeof(*pmb));
2556 job->reply->reply_payload_rcv_len =
2557 sg_copy_from_buffer(job->reply_payload.sg_list,
2558 job->reply_payload.sg_cnt,
2562 mempool_free(pmboxq, phba->mbox_mem_pool);
2563 /* not waiting mbox already done */
2567 /* setup wake call as IOCB callback */
2568 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2569 /* setup context field to pass wait_queue pointer to wake function */
2570 pmboxq->context1 = dd_data;
2571 dd_data->type = TYPE_MBOX;
2572 dd_data->context_un.mbox.pmboxq = pmboxq;
2573 dd_data->context_un.mbox.mb = mb;
2574 dd_data->context_un.mbox.set_job = job;
2575 job->dd_data = dd_data;
2576 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2577 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
2580 mempool_free(pmboxq, phba->mbox_mem_pool);
2588 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2589 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2592 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2594 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2595 struct lpfc_hba *phba = vport->phba;
2598 /* in case no data is transferred */
2599 job->reply->reply_payload_rcv_len = 0;
2600 if (job->request_len <
2601 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2602 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2603 "2737 Received MBOX_REQ request below "
2609 if (job->request_payload.payload_len != PAGE_SIZE) {
2614 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2619 rc = lpfc_bsg_issue_mbox(phba, job, vport);
2624 job->reply->result = 0;
2625 job->dd_data = NULL;
2628 /* job submitted, will complete later*/
2629 rc = 0; /* return zero, no error */
2631 /* some error occurred */
2632 job->reply->result = rc;
2633 job->dd_data = NULL;
2640 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
2641 * @job: fc_bsg_job to handle
2644 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
2646 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
2650 case LPFC_BSG_VENDOR_SET_CT_EVENT:
2651 rc = lpfc_bsg_hba_set_event(job);
2653 case LPFC_BSG_VENDOR_GET_CT_EVENT:
2654 rc = lpfc_bsg_hba_get_event(job);
2656 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
2657 rc = lpfc_bsg_send_mgmt_rsp(job);
2659 case LPFC_BSG_VENDOR_DIAG_MODE:
2660 rc = lpfc_bsg_diag_mode(job);
2662 case LPFC_BSG_VENDOR_DIAG_TEST:
2663 rc = lpfc_bsg_diag_test(job);
2665 case LPFC_BSG_VENDOR_GET_MGMT_REV:
2666 rc = lpfc_bsg_get_dfc_rev(job);
2668 case LPFC_BSG_VENDOR_MBOX:
2669 rc = lpfc_bsg_mbox_cmd(job);
2673 job->reply->reply_payload_rcv_len = 0;
2674 /* make error code available to userspace */
2675 job->reply->result = rc;
2683 * lpfc_bsg_request - handle a bsg request from the FC transport
2684 * @job: fc_bsg_job to handle
2687 lpfc_bsg_request(struct fc_bsg_job *job)
2692 msgcode = job->request->msgcode;
2694 case FC_BSG_HST_VENDOR:
2695 rc = lpfc_bsg_hst_vendor(job);
2697 case FC_BSG_RPT_ELS:
2698 rc = lpfc_bsg_rport_els(job);
2701 rc = lpfc_bsg_send_mgmt_cmd(job);
2705 job->reply->reply_payload_rcv_len = 0;
2706 /* make error code available to userspace */
2707 job->reply->result = rc;
2715 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
2716 * @job: fc_bsg_job that has timed out
2718 * This function just aborts the job's IOCB. The aborted IOCB will return to
2719 * the waiting function which will handle passing the error back to userspace
2722 lpfc_bsg_timeout(struct fc_bsg_job *job)
2724 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2725 struct lpfc_hba *phba = vport->phba;
2726 struct lpfc_iocbq *cmdiocb;
2727 struct lpfc_bsg_event *evt;
2728 struct lpfc_bsg_iocb *iocb;
2729 struct lpfc_bsg_mbox *mbox;
2730 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2731 struct bsg_job_data *dd_data;
2732 unsigned long flags;
2734 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2735 dd_data = (struct bsg_job_data *)job->dd_data;
2736 /* timeout and completion crossed paths if no dd_data */
2738 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2742 switch (dd_data->type) {
2744 iocb = &dd_data->context_un.iocb;
2745 cmdiocb = iocb->cmdiocbq;
2746 /* hint to completion handler that the job timed out */
2747 job->reply->result = -EAGAIN;
2748 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2749 /* this will call our completion handler */
2750 spin_lock_irq(&phba->hbalock);
2751 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
2752 spin_unlock_irq(&phba->hbalock);
2755 evt = dd_data->context_un.evt;
2756 /* this event has no job anymore */
2757 evt->set_job = NULL;
2758 job->dd_data = NULL;
2759 job->reply->reply_payload_rcv_len = 0;
2760 /* Return -EAGAIN which is our way of signallying the
2763 job->reply->result = -EAGAIN;
2764 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2768 mbox = &dd_data->context_un.mbox;
2769 /* this mbox has no job anymore */
2770 mbox->set_job = NULL;
2771 job->dd_data = NULL;
2772 job->reply->reply_payload_rcv_len = 0;
2773 job->reply->result = -EAGAIN;
2774 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2778 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2782 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
2783 * otherwise an error message will be displayed on the console
2784 * so always return success (zero)