[SCSI] lpfc 8.3.8: (BSG4) Add new vendor specific BSG Commands
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_bsg.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2009-2010 Emulex.  All rights reserved.                *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *******************************************************************/
20
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 #include <scsi/fc/fc_fs.h>
30
31 #include "lpfc_hw4.h"
32 #include "lpfc_hw.h"
33 #include "lpfc_sli.h"
34 #include "lpfc_sli4.h"
35 #include "lpfc_nl.h"
36 #include "lpfc_bsg.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_version.h"
44
45 struct lpfc_bsg_event {
46         struct list_head node;
47         struct kref kref;
48         wait_queue_head_t wq;
49
50         /* Event type and waiter identifiers */
51         uint32_t type_mask;
52         uint32_t req_id;
53         uint32_t reg_id;
54
55         /* next two flags are here for the auto-delete logic */
56         unsigned long wait_time_stamp;
57         int waiting;
58
59         /* seen and not seen events */
60         struct list_head events_to_get;
61         struct list_head events_to_see;
62
63         /* job waiting for this event to finish */
64         struct fc_bsg_job *set_job;
65 };
66
67 struct lpfc_bsg_iocb {
68         struct lpfc_iocbq *cmdiocbq;
69         struct lpfc_iocbq *rspiocbq;
70         struct lpfc_dmabuf *bmp;
71         struct lpfc_nodelist *ndlp;
72
73         /* job waiting for this iocb to finish */
74         struct fc_bsg_job *set_job;
75 };
76
77 struct lpfc_bsg_mbox {
78         LPFC_MBOXQ_t *pmboxq;
79         MAILBOX_t *mb;
80
81         /* job waiting for this mbox command to finish */
82         struct fc_bsg_job *set_job;
83 };
84
85 #define TYPE_EVT        1
86 #define TYPE_IOCB       2
87 #define TYPE_MBOX       3
88 struct bsg_job_data {
89         uint32_t type;
90         union {
91                 struct lpfc_bsg_event *evt;
92                 struct lpfc_bsg_iocb iocb;
93                 struct lpfc_bsg_mbox mbox;
94         } context_un;
95 };
96
97 struct event_data {
98         struct list_head node;
99         uint32_t type;
100         uint32_t immed_dat;
101         void *data;
102         uint32_t len;
103 };
104
105 #define BUF_SZ_4K 4096
106 #define SLI_CT_ELX_LOOPBACK 0x10
107
108 enum ELX_LOOPBACK_CMD {
109         ELX_LOOPBACK_XRI_SETUP,
110         ELX_LOOPBACK_DATA,
111 };
112
113 #define ELX_LOOPBACK_HEADER_SZ \
114         (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
115
116 struct lpfc_dmabufext {
117         struct lpfc_dmabuf dma;
118         uint32_t size;
119         uint32_t flag;
120 };
121
122 /**
123  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
124  * @phba: Pointer to HBA context object.
125  * @cmdiocbq: Pointer to command iocb.
126  * @rspiocbq: Pointer to response iocb.
127  *
128  * This function is the completion handler for iocbs issued using
129  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
130  * ring event handler function without any lock held. This function
131  * can be called from both worker thread context and interrupt
132  * context. This function also can be called from another thread which
133  * cleans up the SLI layer objects.
134  * This function copies the contents of the response iocb to the
135  * response iocb memory object provided by the caller of
136  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
137  * sleeps for the iocb completion.
138  **/
139 static void
140 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
141                         struct lpfc_iocbq *cmdiocbq,
142                         struct lpfc_iocbq *rspiocbq)
143 {
144         unsigned long iflags;
145         struct bsg_job_data *dd_data;
146         struct fc_bsg_job *job;
147         IOCB_t *rsp;
148         struct lpfc_dmabuf *bmp;
149         struct lpfc_nodelist *ndlp;
150         struct lpfc_bsg_iocb *iocb;
151         unsigned long flags;
152         int rc = 0;
153
154         spin_lock_irqsave(&phba->ct_ev_lock, flags);
155         dd_data = cmdiocbq->context1;
156         if (!dd_data) {
157                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
158                 return;
159         }
160
161         iocb = &dd_data->context_un.iocb;
162         job = iocb->set_job;
163         job->dd_data = NULL; /* so timeout handler does not reply */
164
165         spin_lock_irqsave(&phba->hbalock, iflags);
166         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
167         if (cmdiocbq->context2 && rspiocbq)
168                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
169                        &rspiocbq->iocb, sizeof(IOCB_t));
170         spin_unlock_irqrestore(&phba->hbalock, iflags);
171
172         bmp = iocb->bmp;
173         rspiocbq = iocb->rspiocbq;
174         rsp = &rspiocbq->iocb;
175         ndlp = iocb->ndlp;
176
177         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
178                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
179         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
180                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
181
182         if (rsp->ulpStatus) {
183                 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
184                         switch (rsp->un.ulpWord[4] & 0xff) {
185                         case IOERR_SEQUENCE_TIMEOUT:
186                                 rc = -ETIMEDOUT;
187                                 break;
188                         case IOERR_INVALID_RPI:
189                                 rc = -EFAULT;
190                                 break;
191                         default:
192                                 rc = -EACCES;
193                                 break;
194                         }
195                 } else
196                         rc = -EACCES;
197         } else
198                 job->reply->reply_payload_rcv_len =
199                         rsp->un.genreq64.bdl.bdeSize;
200
201         lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
202         lpfc_sli_release_iocbq(phba, rspiocbq);
203         lpfc_sli_release_iocbq(phba, cmdiocbq);
204         lpfc_nlp_put(ndlp);
205         kfree(bmp);
206         kfree(dd_data);
207         /* make error code available to userspace */
208         job->reply->result = rc;
209         /* complete the job back to userspace */
210         job->job_done(job);
211         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
212         return;
213 }
214
215 /**
216  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
217  * @job: fc_bsg_job to handle
218  **/
219 static int
220 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
221 {
222         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
223         struct lpfc_hba *phba = vport->phba;
224         struct lpfc_rport_data *rdata = job->rport->dd_data;
225         struct lpfc_nodelist *ndlp = rdata->pnode;
226         struct ulp_bde64 *bpl = NULL;
227         uint32_t timeout;
228         struct lpfc_iocbq *cmdiocbq = NULL;
229         struct lpfc_iocbq *rspiocbq = NULL;
230         IOCB_t *cmd;
231         IOCB_t *rsp;
232         struct lpfc_dmabuf *bmp = NULL;
233         int request_nseg;
234         int reply_nseg;
235         struct scatterlist *sgel = NULL;
236         int numbde;
237         dma_addr_t busaddr;
238         struct bsg_job_data *dd_data;
239         uint32_t creg_val;
240         int rc = 0;
241
242         /* in case no data is transferred */
243         job->reply->reply_payload_rcv_len = 0;
244
245         /* allocate our bsg tracking structure */
246         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
247         if (!dd_data) {
248                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
249                                 "2733 Failed allocation of dd_data\n");
250                 rc = -ENOMEM;
251                 goto no_dd_data;
252         }
253
254         if (!lpfc_nlp_get(ndlp)) {
255                 rc = -ENODEV;
256                 goto no_ndlp;
257         }
258
259         bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
260         if (!bmp) {
261                 rc = -ENOMEM;
262                 goto free_ndlp;
263         }
264
265         if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
266                 rc = -ENODEV;
267                 goto free_bmp;
268         }
269
270         cmdiocbq = lpfc_sli_get_iocbq(phba);
271         if (!cmdiocbq) {
272                 rc = -ENOMEM;
273                 goto free_bmp;
274         }
275
276         cmd = &cmdiocbq->iocb;
277         rspiocbq = lpfc_sli_get_iocbq(phba);
278         if (!rspiocbq) {
279                 rc = -ENOMEM;
280                 goto free_cmdiocbq;
281         }
282
283         rsp = &rspiocbq->iocb;
284         bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
285         if (!bmp->virt) {
286                 rc = -ENOMEM;
287                 goto free_rspiocbq;
288         }
289
290         INIT_LIST_HEAD(&bmp->list);
291         bpl = (struct ulp_bde64 *) bmp->virt;
292         request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
293                                   job->request_payload.sg_cnt, DMA_TO_DEVICE);
294         for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
295                 busaddr = sg_dma_address(sgel);
296                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
297                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
298                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
299                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
300                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
301                 bpl++;
302         }
303
304         reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
305                                 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
306         for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
307                 busaddr = sg_dma_address(sgel);
308                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
309                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
310                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
311                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
312                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
313                 bpl++;
314         }
315
316         cmd->un.genreq64.bdl.ulpIoTag32 = 0;
317         cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
318         cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
319         cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
320         cmd->un.genreq64.bdl.bdeSize =
321                 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
322         cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
323         cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
324         cmd->un.genreq64.w5.hcsw.Dfctl = 0;
325         cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
326         cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
327         cmd->ulpBdeCount = 1;
328         cmd->ulpLe = 1;
329         cmd->ulpClass = CLASS3;
330         cmd->ulpContext = ndlp->nlp_rpi;
331         cmd->ulpOwner = OWN_CHIP;
332         cmdiocbq->vport = phba->pport;
333         cmdiocbq->context3 = bmp;
334         cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
335         timeout = phba->fc_ratov * 2;
336         cmd->ulpTimeout = timeout;
337
338         cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
339         cmdiocbq->context1 = dd_data;
340         cmdiocbq->context2 = rspiocbq;
341         dd_data->type = TYPE_IOCB;
342         dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
343         dd_data->context_un.iocb.rspiocbq = rspiocbq;
344         dd_data->context_un.iocb.set_job = job;
345         dd_data->context_un.iocb.bmp = bmp;
346         dd_data->context_un.iocb.ndlp = ndlp;
347
348         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
349                 creg_val = readl(phba->HCregaddr);
350                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
351                 writel(creg_val, phba->HCregaddr);
352                 readl(phba->HCregaddr); /* flush */
353         }
354
355         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
356
357         if (rc == IOCB_SUCCESS)
358                 return 0; /* done for now */
359
360         /* iocb failed so cleanup */
361         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
362                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
363         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
364                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
365
366         lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
367
368 free_rspiocbq:
369         lpfc_sli_release_iocbq(phba, rspiocbq);
370 free_cmdiocbq:
371         lpfc_sli_release_iocbq(phba, cmdiocbq);
372 free_bmp:
373         kfree(bmp);
374 free_ndlp:
375         lpfc_nlp_put(ndlp);
376 no_ndlp:
377         kfree(dd_data);
378 no_dd_data:
379         /* make error code available to userspace */
380         job->reply->result = rc;
381         job->dd_data = NULL;
382         return rc;
383 }
384
385 /**
386  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
387  * @phba: Pointer to HBA context object.
388  * @cmdiocbq: Pointer to command iocb.
389  * @rspiocbq: Pointer to response iocb.
390  *
391  * This function is the completion handler for iocbs issued using
392  * lpfc_bsg_rport_els_cmp function. This function is called by the
393  * ring event handler function without any lock held. This function
394  * can be called from both worker thread context and interrupt
395  * context. This function also can be called from other thread which
396  * cleans up the SLI layer objects.
397  * This function copies the contents of the response iocb to the
398  * response iocb memory object provided by the caller of
399  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
400  * sleeps for the iocb completion.
401  **/
402 static void
403 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
404                         struct lpfc_iocbq *cmdiocbq,
405                         struct lpfc_iocbq *rspiocbq)
406 {
407         struct bsg_job_data *dd_data;
408         struct fc_bsg_job *job;
409         IOCB_t *rsp;
410         struct lpfc_nodelist *ndlp;
411         struct lpfc_dmabuf *pbuflist = NULL;
412         struct fc_bsg_ctels_reply *els_reply;
413         uint8_t *rjt_data;
414         unsigned long flags;
415         int rc = 0;
416
417         spin_lock_irqsave(&phba->ct_ev_lock, flags);
418         dd_data = cmdiocbq->context1;
419         /* normal completion and timeout crossed paths, already done */
420         if (!dd_data) {
421                 spin_unlock_irqrestore(&phba->hbalock, flags);
422                 return;
423         }
424
425         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
426         if (cmdiocbq->context2 && rspiocbq)
427                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
428                        &rspiocbq->iocb, sizeof(IOCB_t));
429
430         job = dd_data->context_un.iocb.set_job;
431         cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
432         rspiocbq = dd_data->context_un.iocb.rspiocbq;
433         rsp = &rspiocbq->iocb;
434         ndlp = dd_data->context_un.iocb.ndlp;
435
436         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
437                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
438         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
439                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
440
441         if (job->reply->result == -EAGAIN)
442                 rc = -EAGAIN;
443         else if (rsp->ulpStatus == IOSTAT_SUCCESS)
444                 job->reply->reply_payload_rcv_len =
445                         rsp->un.elsreq64.bdl.bdeSize;
446         else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
447                 job->reply->reply_payload_rcv_len =
448                         sizeof(struct fc_bsg_ctels_reply);
449                 /* LS_RJT data returned in word 4 */
450                 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
451                 els_reply = &job->reply->reply_data.ctels_reply;
452                 els_reply->status = FC_CTELS_STATUS_REJECT;
453                 els_reply->rjt_data.action = rjt_data[3];
454                 els_reply->rjt_data.reason_code = rjt_data[2];
455                 els_reply->rjt_data.reason_explanation = rjt_data[1];
456                 els_reply->rjt_data.vendor_unique = rjt_data[0];
457         } else
458                 rc = -EIO;
459
460         pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
461         lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
462         lpfc_sli_release_iocbq(phba, rspiocbq);
463         lpfc_sli_release_iocbq(phba, cmdiocbq);
464         lpfc_nlp_put(ndlp);
465         kfree(dd_data);
466         /* make error code available to userspace */
467         job->reply->result = rc;
468         job->dd_data = NULL;
469         /* complete the job back to userspace */
470         job->job_done(job);
471         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
472         return;
473 }
474
475 /**
476  * lpfc_bsg_rport_els - send an ELS command from a bsg request
477  * @job: fc_bsg_job to handle
478  **/
479 static int
480 lpfc_bsg_rport_els(struct fc_bsg_job *job)
481 {
482         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
483         struct lpfc_hba *phba = vport->phba;
484         struct lpfc_rport_data *rdata = job->rport->dd_data;
485         struct lpfc_nodelist *ndlp = rdata->pnode;
486         uint32_t elscmd;
487         uint32_t cmdsize;
488         uint32_t rspsize;
489         struct lpfc_iocbq *rspiocbq;
490         struct lpfc_iocbq *cmdiocbq;
491         IOCB_t *rsp;
492         uint16_t rpi = 0;
493         struct lpfc_dmabuf *pcmd;
494         struct lpfc_dmabuf *prsp;
495         struct lpfc_dmabuf *pbuflist = NULL;
496         struct ulp_bde64 *bpl;
497         int request_nseg;
498         int reply_nseg;
499         struct scatterlist *sgel = NULL;
500         int numbde;
501         dma_addr_t busaddr;
502         struct bsg_job_data *dd_data;
503         uint32_t creg_val;
504         int rc = 0;
505
506         /* in case no data is transferred */
507         job->reply->reply_payload_rcv_len = 0;
508
509         /* allocate our bsg tracking structure */
510         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
511         if (!dd_data) {
512                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
513                                 "2735 Failed allocation of dd_data\n");
514                 rc = -ENOMEM;
515                 goto no_dd_data;
516         }
517
518         if (!lpfc_nlp_get(ndlp)) {
519                 rc = -ENODEV;
520                 goto free_dd_data;
521         }
522
523         elscmd = job->request->rqst_data.r_els.els_code;
524         cmdsize = job->request_payload.payload_len;
525         rspsize = job->reply_payload.payload_len;
526         rspiocbq = lpfc_sli_get_iocbq(phba);
527         if (!rspiocbq) {
528                 lpfc_nlp_put(ndlp);
529                 rc = -ENOMEM;
530                 goto free_dd_data;
531         }
532
533         rsp = &rspiocbq->iocb;
534         rpi = ndlp->nlp_rpi;
535
536         cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
537                                       ndlp->nlp_DID, elscmd);
538         if (!cmdiocbq) {
539                 rc = -EIO;
540                 goto free_rspiocbq;
541         }
542
543         /* prep els iocb set context1 to the ndlp, context2 to the command
544          * dmabuf, context3 holds the data dmabuf
545          */
546         pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
547         prsp = (struct lpfc_dmabuf *) pcmd->list.next;
548         lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
549         kfree(pcmd);
550         lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
551         kfree(prsp);
552         cmdiocbq->context2 = NULL;
553
554         pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
555         bpl = (struct ulp_bde64 *) pbuflist->virt;
556
557         request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
558                                   job->request_payload.sg_cnt, DMA_TO_DEVICE);
559         for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
560                 busaddr = sg_dma_address(sgel);
561                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
562                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
563                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
564                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
565                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
566                 bpl++;
567         }
568
569         reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
570                                 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
571         for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
572                 busaddr = sg_dma_address(sgel);
573                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
574                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
575                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
576                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
577                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
578                 bpl++;
579         }
580         cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
581                 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
582         cmdiocbq->iocb.ulpContext = rpi;
583         cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
584         cmdiocbq->context1 = NULL;
585         cmdiocbq->context2 = NULL;
586
587         cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
588         cmdiocbq->context1 = dd_data;
589         cmdiocbq->context2 = rspiocbq;
590         dd_data->type = TYPE_IOCB;
591         dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
592         dd_data->context_un.iocb.rspiocbq = rspiocbq;
593         dd_data->context_un.iocb.set_job = job;
594         dd_data->context_un.iocb.bmp = NULL;;
595         dd_data->context_un.iocb.ndlp = ndlp;
596
597         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
598                 creg_val = readl(phba->HCregaddr);
599                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
600                 writel(creg_val, phba->HCregaddr);
601                 readl(phba->HCregaddr); /* flush */
602         }
603         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
604         lpfc_nlp_put(ndlp);
605         if (rc == IOCB_SUCCESS)
606                 return 0; /* done for now */
607
608         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
609                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
610         pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
611                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
612
613         lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
614
615         lpfc_sli_release_iocbq(phba, cmdiocbq);
616
617 free_rspiocbq:
618         lpfc_sli_release_iocbq(phba, rspiocbq);
619
620 free_dd_data:
621         kfree(dd_data);
622
623 no_dd_data:
624         /* make error code available to userspace */
625         job->reply->result = rc;
626         job->dd_data = NULL;
627         return rc;
628 }
629
630 /**
631  * lpfc_bsg_event_free - frees an allocated event structure
632  * @kref: Pointer to a kref.
633  *
634  * Called from kref_put. Back cast the kref into an event structure address.
635  * Free any events to get, delete associated nodes, free any events to see,
636  * free any data then free the event itself.
637  **/
638 static void
639 lpfc_bsg_event_free(struct kref *kref)
640 {
641         struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
642                                                   kref);
643         struct event_data *ed;
644
645         list_del(&evt->node);
646
647         while (!list_empty(&evt->events_to_get)) {
648                 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
649                 list_del(&ed->node);
650                 kfree(ed->data);
651                 kfree(ed);
652         }
653
654         while (!list_empty(&evt->events_to_see)) {
655                 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
656                 list_del(&ed->node);
657                 kfree(ed->data);
658                 kfree(ed);
659         }
660
661         kfree(evt);
662 }
663
664 /**
665  * lpfc_bsg_event_ref - increments the kref for an event
666  * @evt: Pointer to an event structure.
667  **/
668 static inline void
669 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
670 {
671         kref_get(&evt->kref);
672 }
673
674 /**
675  * lpfc_bsg_event_unref - Uses kref_put to free an event structure
676  * @evt: Pointer to an event structure.
677  **/
678 static inline void
679 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
680 {
681         kref_put(&evt->kref, lpfc_bsg_event_free);
682 }
683
684 /**
685  * lpfc_bsg_event_new - allocate and initialize a event structure
686  * @ev_mask: Mask of events.
687  * @ev_reg_id: Event reg id.
688  * @ev_req_id: Event request id.
689  **/
690 static struct lpfc_bsg_event *
691 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
692 {
693         struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
694
695         if (!evt)
696                 return NULL;
697
698         INIT_LIST_HEAD(&evt->events_to_get);
699         INIT_LIST_HEAD(&evt->events_to_see);
700         evt->type_mask = ev_mask;
701         evt->req_id = ev_req_id;
702         evt->reg_id = ev_reg_id;
703         evt->wait_time_stamp = jiffies;
704         init_waitqueue_head(&evt->wq);
705         kref_init(&evt->kref);
706         return evt;
707 }
708
709 /**
710  * diag_cmd_data_free - Frees an lpfc dma buffer extension
711  * @phba: Pointer to HBA context object.
712  * @mlist: Pointer to an lpfc dma buffer extension.
713  **/
714 static int
715 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
716 {
717         struct lpfc_dmabufext *mlast;
718         struct pci_dev *pcidev;
719         struct list_head head, *curr, *next;
720
721         if ((!mlist) || (!lpfc_is_link_up(phba) &&
722                 (phba->link_flag & LS_LOOPBACK_MODE))) {
723                 return 0;
724         }
725
726         pcidev = phba->pcidev;
727         list_add_tail(&head, &mlist->dma.list);
728
729         list_for_each_safe(curr, next, &head) {
730                 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
731                 if (mlast->dma.virt)
732                         dma_free_coherent(&pcidev->dev,
733                                           mlast->size,
734                                           mlast->dma.virt,
735                                           mlast->dma.phys);
736                 kfree(mlast);
737         }
738         return 0;
739 }
740
741 /**
742  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
743  * @phba:
744  * @pring:
745  * @piocbq:
746  *
747  * This function is called when an unsolicited CT command is received.  It
748  * forwards the event to any processes registered to receive CT events.
749  **/
750 int
751 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
752                         struct lpfc_iocbq *piocbq)
753 {
754         uint32_t evt_req_id = 0;
755         uint32_t cmd;
756         uint32_t len;
757         struct lpfc_dmabuf *dmabuf = NULL;
758         struct lpfc_bsg_event *evt;
759         struct event_data *evt_dat = NULL;
760         struct lpfc_iocbq *iocbq;
761         size_t offset = 0;
762         struct list_head head;
763         struct ulp_bde64 *bde;
764         dma_addr_t dma_addr;
765         int i;
766         struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
767         struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
768         struct lpfc_hbq_entry *hbqe;
769         struct lpfc_sli_ct_request *ct_req;
770         struct fc_bsg_job *job = NULL;
771         unsigned long flags;
772         int size = 0;
773
774         INIT_LIST_HEAD(&head);
775         list_add_tail(&head, &piocbq->list);
776
777         if (piocbq->iocb.ulpBdeCount == 0 ||
778             piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
779                 goto error_ct_unsol_exit;
780
781         if (phba->link_state == LPFC_HBA_ERROR ||
782                 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
783                 goto error_ct_unsol_exit;
784
785         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
786                 dmabuf = bdeBuf1;
787         else {
788                 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
789                                     piocbq->iocb.un.cont64[0].addrLow);
790                 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
791         }
792         if (dmabuf == NULL)
793                 goto error_ct_unsol_exit;
794         ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
795         evt_req_id = ct_req->FsType;
796         cmd = ct_req->CommandResponse.bits.CmdRsp;
797         len = ct_req->CommandResponse.bits.Size;
798         if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
799                 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
800
801         spin_lock_irqsave(&phba->ct_ev_lock, flags);
802         list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
803                 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
804                         evt->req_id != evt_req_id)
805                         continue;
806
807                 lpfc_bsg_event_ref(evt);
808                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
809                 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
810                 if (evt_dat == NULL) {
811                         spin_lock_irqsave(&phba->ct_ev_lock, flags);
812                         lpfc_bsg_event_unref(evt);
813                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
814                                         "2614 Memory allocation failed for "
815                                         "CT event\n");
816                         break;
817                 }
818
819                 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
820                         /* take accumulated byte count from the last iocbq */
821                         iocbq = list_entry(head.prev, typeof(*iocbq), list);
822                         evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
823                 } else {
824                         list_for_each_entry(iocbq, &head, list) {
825                                 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
826                                         evt_dat->len +=
827                                         iocbq->iocb.un.cont64[i].tus.f.bdeSize;
828                         }
829                 }
830
831                 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
832                 if (evt_dat->data == NULL) {
833                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
834                                         "2615 Memory allocation failed for "
835                                         "CT event data, size %d\n",
836                                         evt_dat->len);
837                         kfree(evt_dat);
838                         spin_lock_irqsave(&phba->ct_ev_lock, flags);
839                         lpfc_bsg_event_unref(evt);
840                         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
841                         goto error_ct_unsol_exit;
842                 }
843
844                 list_for_each_entry(iocbq, &head, list) {
845                         size = 0;
846                         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
847                                 bdeBuf1 = iocbq->context2;
848                                 bdeBuf2 = iocbq->context3;
849                         }
850                         for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
851                                 if (phba->sli3_options &
852                                     LPFC_SLI3_HBQ_ENABLED) {
853                                         if (i == 0) {
854                                                 hbqe = (struct lpfc_hbq_entry *)
855                                                   &iocbq->iocb.un.ulpWord[0];
856                                                 size = hbqe->bde.tus.f.bdeSize;
857                                                 dmabuf = bdeBuf1;
858                                         } else if (i == 1) {
859                                                 hbqe = (struct lpfc_hbq_entry *)
860                                                         &iocbq->iocb.unsli3.
861                                                         sli3Words[4];
862                                                 size = hbqe->bde.tus.f.bdeSize;
863                                                 dmabuf = bdeBuf2;
864                                         }
865                                         if ((offset + size) > evt_dat->len)
866                                                 size = evt_dat->len - offset;
867                                 } else {
868                                         size = iocbq->iocb.un.cont64[i].
869                                                 tus.f.bdeSize;
870                                         bde = &iocbq->iocb.un.cont64[i];
871                                         dma_addr = getPaddr(bde->addrHigh,
872                                                             bde->addrLow);
873                                         dmabuf = lpfc_sli_ringpostbuf_get(phba,
874                                                         pring, dma_addr);
875                                 }
876                                 if (!dmabuf) {
877                                         lpfc_printf_log(phba, KERN_ERR,
878                                                 LOG_LIBDFC, "2616 No dmabuf "
879                                                 "found for iocbq 0x%p\n",
880                                                 iocbq);
881                                         kfree(evt_dat->data);
882                                         kfree(evt_dat);
883                                         spin_lock_irqsave(&phba->ct_ev_lock,
884                                                 flags);
885                                         lpfc_bsg_event_unref(evt);
886                                         spin_unlock_irqrestore(
887                                                 &phba->ct_ev_lock, flags);
888                                         goto error_ct_unsol_exit;
889                                 }
890                                 memcpy((char *)(evt_dat->data) + offset,
891                                        dmabuf->virt, size);
892                                 offset += size;
893                                 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
894                                     !(phba->sli3_options &
895                                       LPFC_SLI3_HBQ_ENABLED)) {
896                                         lpfc_sli_ringpostbuf_put(phba, pring,
897                                                                  dmabuf);
898                                 } else {
899                                         switch (cmd) {
900                                         case ELX_LOOPBACK_DATA:
901                                                 diag_cmd_data_free(phba,
902                                                 (struct lpfc_dmabufext *)
903                                                         dmabuf);
904                                                 break;
905                                         case ELX_LOOPBACK_XRI_SETUP:
906                                                 if ((phba->sli_rev ==
907                                                         LPFC_SLI_REV2) ||
908                                                         (phba->sli3_options &
909                                                         LPFC_SLI3_HBQ_ENABLED
910                                                         )) {
911                                                         lpfc_in_buf_free(phba,
912                                                                         dmabuf);
913                                                 } else {
914                                                         lpfc_post_buffer(phba,
915                                                                          pring,
916                                                                          1);
917                                                 }
918                                                 break;
919                                         default:
920                                                 if (!(phba->sli3_options &
921                                                       LPFC_SLI3_HBQ_ENABLED))
922                                                         lpfc_post_buffer(phba,
923                                                                          pring,
924                                                                          1);
925                                                 break;
926                                         }
927                                 }
928                         }
929                 }
930
931                 spin_lock_irqsave(&phba->ct_ev_lock, flags);
932                 if (phba->sli_rev == LPFC_SLI_REV4) {
933                         evt_dat->immed_dat = phba->ctx_idx;
934                         phba->ctx_idx = (phba->ctx_idx + 1) % 64;
935                         phba->ct_ctx[evt_dat->immed_dat].oxid =
936                                                 piocbq->iocb.ulpContext;
937                         phba->ct_ctx[evt_dat->immed_dat].SID =
938                                 piocbq->iocb.un.rcvels.remoteID;
939                 } else
940                         evt_dat->immed_dat = piocbq->iocb.ulpContext;
941
942                 evt_dat->type = FC_REG_CT_EVENT;
943                 list_add(&evt_dat->node, &evt->events_to_see);
944                 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
945                         wake_up_interruptible(&evt->wq);
946                         lpfc_bsg_event_unref(evt);
947                         break;
948                 }
949
950                 list_move(evt->events_to_see.prev, &evt->events_to_get);
951                 lpfc_bsg_event_unref(evt);
952
953                 job = evt->set_job;
954                 evt->set_job = NULL;
955                 if (job) {
956                         job->reply->reply_payload_rcv_len = size;
957                         /* make error code available to userspace */
958                         job->reply->result = 0;
959                         job->dd_data = NULL;
960                         /* complete the job back to userspace */
961                         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
962                         job->job_done(job);
963                         spin_lock_irqsave(&phba->ct_ev_lock, flags);
964                 }
965         }
966         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
967
968 error_ct_unsol_exit:
969         if (!list_empty(&head))
970                 list_del(&head);
971         if (evt_req_id == SLI_CT_ELX_LOOPBACK)
972                 return 0;
973         return 1;
974 }
975
976 /**
977  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
978  * @job: SET_EVENT fc_bsg_job
979  **/
980 static int
981 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
982 {
983         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
984         struct lpfc_hba *phba = vport->phba;
985         struct set_ct_event *event_req;
986         struct lpfc_bsg_event *evt;
987         int rc = 0;
988         struct bsg_job_data *dd_data = NULL;
989         uint32_t ev_mask;
990         unsigned long flags;
991
992         if (job->request_len <
993             sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
994                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
995                                 "2612 Received SET_CT_EVENT below minimum "
996                                 "size\n");
997                 rc = -EINVAL;
998                 goto job_error;
999         }
1000
1001         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1002         if (dd_data == NULL) {
1003                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1004                                 "2734 Failed allocation of dd_data\n");
1005                 rc = -ENOMEM;
1006                 goto job_error;
1007         }
1008
1009         event_req = (struct set_ct_event *)
1010                 job->request->rqst_data.h_vendor.vendor_cmd;
1011         ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1012                                 FC_REG_EVENT_MASK);
1013         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1014         list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1015                 if (evt->reg_id == event_req->ev_reg_id) {
1016                         lpfc_bsg_event_ref(evt);
1017                         evt->wait_time_stamp = jiffies;
1018                         break;
1019                 }
1020         }
1021         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1022
1023         if (&evt->node == &phba->ct_ev_waiters) {
1024                 /* no event waiting struct yet - first call */
1025                 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1026                                         event_req->ev_req_id);
1027                 if (!evt) {
1028                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1029                                         "2617 Failed allocation of event "
1030                                         "waiter\n");
1031                         rc = -ENOMEM;
1032                         goto job_error;
1033                 }
1034
1035                 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1036                 list_add(&evt->node, &phba->ct_ev_waiters);
1037                 lpfc_bsg_event_ref(evt);
1038                 evt->wait_time_stamp = jiffies;
1039                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1040         }
1041
1042         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1043         evt->waiting = 1;
1044         dd_data->type = TYPE_EVT;
1045         dd_data->context_un.evt = evt;
1046         evt->set_job = job; /* for unsolicited command */
1047         job->dd_data = dd_data; /* for fc transport timeout callback*/
1048         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1049         return 0; /* call job done later */
1050
1051 job_error:
1052         if (dd_data != NULL)
1053                 kfree(dd_data);
1054
1055         job->dd_data = NULL;
1056         return rc;
1057 }
1058
1059 /**
1060  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1061  * @job: GET_EVENT fc_bsg_job
1062  **/
1063 static int
1064 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1065 {
1066         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1067         struct lpfc_hba *phba = vport->phba;
1068         struct get_ct_event *event_req;
1069         struct get_ct_event_reply *event_reply;
1070         struct lpfc_bsg_event *evt;
1071         struct event_data *evt_dat = NULL;
1072         unsigned long flags;
1073         uint32_t rc = 0;
1074
1075         if (job->request_len <
1076             sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1077                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1078                                 "2613 Received GET_CT_EVENT request below "
1079                                 "minimum size\n");
1080                 rc = -EINVAL;
1081                 goto job_error;
1082         }
1083
1084         event_req = (struct get_ct_event *)
1085                 job->request->rqst_data.h_vendor.vendor_cmd;
1086
1087         event_reply = (struct get_ct_event_reply *)
1088                 job->reply->reply_data.vendor_reply.vendor_rsp;
1089         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1090         list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1091                 if (evt->reg_id == event_req->ev_reg_id) {
1092                         if (list_empty(&evt->events_to_get))
1093                                 break;
1094                         lpfc_bsg_event_ref(evt);
1095                         evt->wait_time_stamp = jiffies;
1096                         evt_dat = list_entry(evt->events_to_get.prev,
1097                                              struct event_data, node);
1098                         list_del(&evt_dat->node);
1099                         break;
1100                 }
1101         }
1102         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1103
1104         /* The app may continue to ask for event data until it gets
1105          * an error indicating that there isn't anymore
1106          */
1107         if (evt_dat == NULL) {
1108                 job->reply->reply_payload_rcv_len = 0;
1109                 rc = -ENOENT;
1110                 goto job_error;
1111         }
1112
1113         if (evt_dat->len > job->request_payload.payload_len) {
1114                 evt_dat->len = job->request_payload.payload_len;
1115                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1116                                 "2618 Truncated event data at %d "
1117                                 "bytes\n",
1118                                 job->request_payload.payload_len);
1119         }
1120
1121         event_reply->type = evt_dat->type;
1122         event_reply->immed_data = evt_dat->immed_dat;
1123         if (evt_dat->len > 0)
1124                 job->reply->reply_payload_rcv_len =
1125                         sg_copy_from_buffer(job->request_payload.sg_list,
1126                                             job->request_payload.sg_cnt,
1127                                             evt_dat->data, evt_dat->len);
1128         else
1129                 job->reply->reply_payload_rcv_len = 0;
1130
1131         if (evt_dat) {
1132                 kfree(evt_dat->data);
1133                 kfree(evt_dat);
1134         }
1135
1136         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1137         lpfc_bsg_event_unref(evt);
1138         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1139         job->dd_data = NULL;
1140         job->reply->result = 0;
1141         job->job_done(job);
1142         return 0;
1143
1144 job_error:
1145         job->dd_data = NULL;
1146         job->reply->result = rc;
1147         return rc;
1148 }
1149
1150 /**
1151  * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1152  * @phba: Pointer to HBA context object.
1153  * @cmdiocbq: Pointer to command iocb.
1154  * @rspiocbq: Pointer to response iocb.
1155  *
1156  * This function is the completion handler for iocbs issued using
1157  * lpfc_issue_ct_rsp_cmp function. This function is called by the
1158  * ring event handler function without any lock held. This function
1159  * can be called from both worker thread context and interrupt
1160  * context. This function also can be called from other thread which
1161  * cleans up the SLI layer objects.
1162  * This function copy the contents of the response iocb to the
1163  * response iocb memory object provided by the caller of
1164  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1165  * sleeps for the iocb completion.
1166  **/
1167 static void
1168 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1169                         struct lpfc_iocbq *cmdiocbq,
1170                         struct lpfc_iocbq *rspiocbq)
1171 {
1172         struct bsg_job_data *dd_data;
1173         struct fc_bsg_job *job;
1174         IOCB_t *rsp;
1175         struct lpfc_dmabuf *bmp;
1176         struct lpfc_nodelist *ndlp;
1177         unsigned long flags;
1178         int rc = 0;
1179
1180         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1181         dd_data = cmdiocbq->context1;
1182         /* normal completion and timeout crossed paths, already done */
1183         if (!dd_data) {
1184                 spin_unlock_irqrestore(&phba->hbalock, flags);
1185                 return;
1186         }
1187
1188         job = dd_data->context_un.iocb.set_job;
1189         bmp = dd_data->context_un.iocb.bmp;
1190         rsp = &rspiocbq->iocb;
1191         ndlp = dd_data->context_un.iocb.ndlp;
1192
1193         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1194                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
1195
1196         if (rsp->ulpStatus) {
1197                 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1198                         switch (rsp->un.ulpWord[4] & 0xff) {
1199                         case IOERR_SEQUENCE_TIMEOUT:
1200                                 rc = -ETIMEDOUT;
1201                                 break;
1202                         case IOERR_INVALID_RPI:
1203                                 rc = -EFAULT;
1204                                 break;
1205                         default:
1206                                 rc = -EACCES;
1207                                 break;
1208                         }
1209                 } else
1210                         rc = -EACCES;
1211         } else
1212                 job->reply->reply_payload_rcv_len =
1213                         rsp->un.genreq64.bdl.bdeSize;
1214
1215         lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1216         lpfc_sli_release_iocbq(phba, cmdiocbq);
1217         lpfc_nlp_put(ndlp);
1218         kfree(bmp);
1219         kfree(dd_data);
1220         /* make error code available to userspace */
1221         job->reply->result = rc;
1222         job->dd_data = NULL;
1223         /* complete the job back to userspace */
1224         job->job_done(job);
1225         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1226         return;
1227 }
1228
1229 /**
1230  * lpfc_issue_ct_rsp - issue a ct response
1231  * @phba: Pointer to HBA context object.
1232  * @job: Pointer to the job object.
1233  * @tag: tag index value into the ports context exchange array.
1234  * @bmp: Pointer to a dma buffer descriptor.
1235  * @num_entry: Number of enties in the bde.
1236  **/
1237 static int
1238 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1239                   struct lpfc_dmabuf *bmp, int num_entry)
1240 {
1241         IOCB_t *icmd;
1242         struct lpfc_iocbq *ctiocb = NULL;
1243         int rc = 0;
1244         struct lpfc_nodelist *ndlp = NULL;
1245         struct bsg_job_data *dd_data;
1246         uint32_t creg_val;
1247
1248         /* allocate our bsg tracking structure */
1249         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1250         if (!dd_data) {
1251                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1252                                 "2736 Failed allocation of dd_data\n");
1253                 rc = -ENOMEM;
1254                 goto no_dd_data;
1255         }
1256
1257         /* Allocate buffer for  command iocb */
1258         ctiocb = lpfc_sli_get_iocbq(phba);
1259         if (!ctiocb) {
1260                 rc = ENOMEM;
1261                 goto no_ctiocb;
1262         }
1263
1264         icmd = &ctiocb->iocb;
1265         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1266         icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1267         icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1268         icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1269         icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1270         icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1271         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1272         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1273         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1274
1275         /* Fill in rest of iocb */
1276         icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1277         icmd->ulpBdeCount = 1;
1278         icmd->ulpLe = 1;
1279         icmd->ulpClass = CLASS3;
1280         if (phba->sli_rev == LPFC_SLI_REV4) {
1281                 /* Do not issue unsol response if oxid not marked as valid */
1282                 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1283                         rc = IOCB_ERROR;
1284                         goto issue_ct_rsp_exit;
1285                 }
1286                 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1287                 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1288                 if (!ndlp) {
1289                         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1290                                  "2721 ndlp null for oxid %x SID %x\n",
1291                                         icmd->ulpContext,
1292                                         phba->ct_ctx[tag].SID);
1293                         rc = IOCB_ERROR;
1294                         goto issue_ct_rsp_exit;
1295                 }
1296                 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1297                 /* The exchange is done, mark the entry as invalid */
1298                 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1299         } else
1300                 icmd->ulpContext = (ushort) tag;
1301
1302         icmd->ulpTimeout = phba->fc_ratov * 2;
1303
1304         /* Xmit CT response on exchange <xid> */
1305         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1306                         "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1307                         icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1308
1309         ctiocb->iocb_cmpl = NULL;
1310         ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1311         ctiocb->vport = phba->pport;
1312         ctiocb->context3 = bmp;
1313
1314         ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1315         ctiocb->context1 = dd_data;
1316         ctiocb->context2 = NULL;
1317         dd_data->type = TYPE_IOCB;
1318         dd_data->context_un.iocb.cmdiocbq = ctiocb;
1319         dd_data->context_un.iocb.rspiocbq = NULL;
1320         dd_data->context_un.iocb.set_job = job;
1321         dd_data->context_un.iocb.bmp = bmp;
1322         dd_data->context_un.iocb.ndlp = ndlp;
1323
1324         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1325                 creg_val = readl(phba->HCregaddr);
1326                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1327                 writel(creg_val, phba->HCregaddr);
1328                 readl(phba->HCregaddr); /* flush */
1329         }
1330
1331         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1332
1333         if (rc == IOCB_SUCCESS)
1334                 return 0; /* done for now */
1335
1336 issue_ct_rsp_exit:
1337         lpfc_sli_release_iocbq(phba, ctiocb);
1338 no_ctiocb:
1339         kfree(dd_data);
1340 no_dd_data:
1341         return rc;
1342 }
1343
1344 /**
1345  * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1346  * @job: SEND_MGMT_RESP fc_bsg_job
1347  **/
1348 static int
1349 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1350 {
1351         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1352         struct lpfc_hba *phba = vport->phba;
1353         struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1354                 job->request->rqst_data.h_vendor.vendor_cmd;
1355         struct ulp_bde64 *bpl;
1356         struct lpfc_dmabuf *bmp = NULL;
1357         struct scatterlist *sgel = NULL;
1358         int request_nseg;
1359         int numbde;
1360         dma_addr_t busaddr;
1361         uint32_t tag = mgmt_resp->tag;
1362         unsigned long reqbfrcnt =
1363                         (unsigned long)job->request_payload.payload_len;
1364         int rc = 0;
1365
1366         /* in case no data is transferred */
1367         job->reply->reply_payload_rcv_len = 0;
1368
1369         if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1370                 rc = -ERANGE;
1371                 goto send_mgmt_rsp_exit;
1372         }
1373
1374         bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1375         if (!bmp) {
1376                 rc = -ENOMEM;
1377                 goto send_mgmt_rsp_exit;
1378         }
1379
1380         bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1381         if (!bmp->virt) {
1382                 rc = -ENOMEM;
1383                 goto send_mgmt_rsp_free_bmp;
1384         }
1385
1386         INIT_LIST_HEAD(&bmp->list);
1387         bpl = (struct ulp_bde64 *) bmp->virt;
1388         request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1389                                   job->request_payload.sg_cnt, DMA_TO_DEVICE);
1390         for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1391                 busaddr = sg_dma_address(sgel);
1392                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1393                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1394                 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1395                 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1396                 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1397                 bpl++;
1398         }
1399
1400         rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1401
1402         if (rc == IOCB_SUCCESS)
1403                 return 0; /* done for now */
1404
1405         /* TBD need to handle a timeout */
1406         pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1407                           job->request_payload.sg_cnt, DMA_TO_DEVICE);
1408         rc = -EACCES;
1409         lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1410
1411 send_mgmt_rsp_free_bmp:
1412         kfree(bmp);
1413 send_mgmt_rsp_exit:
1414         /* make error code available to userspace */
1415         job->reply->result = rc;
1416         job->dd_data = NULL;
1417         return rc;
1418 }
1419
1420 /**
1421  * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1422  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1423  *
1424  * This function is responsible for placing a port into diagnostic loopback
1425  * mode in order to perform a diagnostic loopback test.
1426  * All new scsi requests are blocked, a small delay is used to allow the
1427  * scsi requests to complete then the link is brought down. If the link is
1428  * is placed in loopback mode then scsi requests are again allowed
1429  * so the scsi mid-layer doesn't give up on the port.
1430  * All of this is done in-line.
1431  */
1432 static int
1433 lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1434 {
1435         struct Scsi_Host *shost = job->shost;
1436         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1437         struct lpfc_hba *phba = vport->phba;
1438         struct diag_mode_set *loopback_mode;
1439         struct lpfc_sli *psli = &phba->sli;
1440         struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1441         uint32_t link_flags;
1442         uint32_t timeout;
1443         struct lpfc_vport **vports;
1444         LPFC_MBOXQ_t *pmboxq;
1445         int mbxstatus;
1446         int i = 0;
1447         int rc = 0;
1448
1449         /* no data to return just the return code */
1450         job->reply->reply_payload_rcv_len = 0;
1451
1452         if (job->request_len <
1453             sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1454                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1455                                 "2738 Received DIAG MODE request below minimum "
1456                                 "size\n");
1457                 rc = -EINVAL;
1458                 goto job_error;
1459         }
1460
1461         loopback_mode = (struct diag_mode_set *)
1462                 job->request->rqst_data.h_vendor.vendor_cmd;
1463         link_flags = loopback_mode->type;
1464         timeout = loopback_mode->timeout;
1465
1466         if ((phba->link_state == LPFC_HBA_ERROR) ||
1467             (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1468             (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1469                 rc = -EACCES;
1470                 goto job_error;
1471         }
1472
1473         pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1474         if (!pmboxq) {
1475                 rc = -ENOMEM;
1476                 goto job_error;
1477         }
1478
1479         vports = lpfc_create_vport_work_array(phba);
1480         if (vports) {
1481                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1482                         shost = lpfc_shost_from_vport(vports[i]);
1483                         scsi_block_requests(shost);
1484                 }
1485
1486                 lpfc_destroy_vport_work_array(phba, vports);
1487         } else {
1488                 shost = lpfc_shost_from_vport(phba->pport);
1489                 scsi_block_requests(shost);
1490         }
1491
1492         while (pring->txcmplq_cnt) {
1493                 if (i++ > 500)  /* wait up to 5 seconds */
1494                         break;
1495
1496                 msleep(10);
1497         }
1498
1499         memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1500         pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1501         pmboxq->u.mb.mbxOwner = OWN_HOST;
1502
1503         mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1504
1505         if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1506                 /* wait for link down before proceeding */
1507                 i = 0;
1508                 while (phba->link_state != LPFC_LINK_DOWN) {
1509                         if (i++ > timeout) {
1510                                 rc = -ETIMEDOUT;
1511                                 goto loopback_mode_exit;
1512                         }
1513
1514                         msleep(10);
1515                 }
1516
1517                 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1518                 if (link_flags == INTERNAL_LOOP_BACK)
1519                         pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1520                 else
1521                         pmboxq->u.mb.un.varInitLnk.link_flags =
1522                                 FLAGS_TOPOLOGY_MODE_LOOP;
1523
1524                 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1525                 pmboxq->u.mb.mbxOwner = OWN_HOST;
1526
1527                 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1528                                                      LPFC_MBOX_TMO);
1529
1530                 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1531                         rc = -ENODEV;
1532                 else {
1533                         phba->link_flag |= LS_LOOPBACK_MODE;
1534                         /* wait for the link attention interrupt */
1535                         msleep(100);
1536
1537                         i = 0;
1538                         while (phba->link_state != LPFC_HBA_READY) {
1539                                 if (i++ > timeout) {
1540                                         rc = -ETIMEDOUT;
1541                                         break;
1542                                 }
1543
1544                                 msleep(10);
1545                         }
1546                 }
1547
1548         } else
1549                 rc = -ENODEV;
1550
1551 loopback_mode_exit:
1552         vports = lpfc_create_vport_work_array(phba);
1553         if (vports) {
1554                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1555                         shost = lpfc_shost_from_vport(vports[i]);
1556                         scsi_unblock_requests(shost);
1557                 }
1558                 lpfc_destroy_vport_work_array(phba, vports);
1559         } else {
1560                 shost = lpfc_shost_from_vport(phba->pport);
1561                 scsi_unblock_requests(shost);
1562         }
1563
1564         /*
1565          * Let SLI layer release mboxq if mbox command completed after timeout.
1566          */
1567         if (mbxstatus != MBX_TIMEOUT)
1568                 mempool_free(pmboxq, phba->mbox_mem_pool);
1569
1570 job_error:
1571         /* make error code available to userspace */
1572         job->reply->result = rc;
1573         /* complete the job back to userspace if no error */
1574         if (rc == 0)
1575                 job->job_done(job);
1576         return rc;
1577 }
1578
1579 /**
1580  * lpfcdiag_loop_self_reg - obtains a remote port login id
1581  * @phba: Pointer to HBA context object
1582  * @rpi: Pointer to a remote port login id
1583  *
1584  * This function obtains a remote port login id so the diag loopback test
1585  * can send and receive its own unsolicited CT command.
1586  **/
1587 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1588 {
1589         LPFC_MBOXQ_t *mbox;
1590         struct lpfc_dmabuf *dmabuff;
1591         int status;
1592
1593         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1594         if (!mbox)
1595                 return ENOMEM;
1596
1597         status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1598                                 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1599         if (status) {
1600                 mempool_free(mbox, phba->mbox_mem_pool);
1601                 return ENOMEM;
1602         }
1603
1604         dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1605         mbox->context1 = NULL;
1606         status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1607
1608         if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1609                 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1610                 kfree(dmabuff);
1611                 if (status != MBX_TIMEOUT)
1612                         mempool_free(mbox, phba->mbox_mem_pool);
1613                 return ENODEV;
1614         }
1615
1616         *rpi = mbox->u.mb.un.varWords[0];
1617
1618         lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1619         kfree(dmabuff);
1620         mempool_free(mbox, phba->mbox_mem_pool);
1621         return 0;
1622 }
1623
1624 /**
1625  * lpfcdiag_loop_self_unreg - unregs from the rpi
1626  * @phba: Pointer to HBA context object
1627  * @rpi: Remote port login id
1628  *
1629  * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1630  **/
1631 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1632 {
1633         LPFC_MBOXQ_t *mbox;
1634         int status;
1635
1636         /* Allocate mboxq structure */
1637         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1638         if (mbox == NULL)
1639                 return ENOMEM;
1640
1641         lpfc_unreg_login(phba, 0, rpi, mbox);
1642         status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1643
1644         if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1645                 if (status != MBX_TIMEOUT)
1646                         mempool_free(mbox, phba->mbox_mem_pool);
1647                 return EIO;
1648         }
1649
1650         mempool_free(mbox, phba->mbox_mem_pool);
1651         return 0;
1652 }
1653
1654 /**
1655  * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1656  * @phba: Pointer to HBA context object
1657  * @rpi: Remote port login id
1658  * @txxri: Pointer to transmit exchange id
1659  * @rxxri: Pointer to response exchabge id
1660  *
1661  * This function obtains the transmit and receive ids required to send
1662  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1663  * flags are used to the unsolicted response handler is able to process
1664  * the ct command sent on the same port.
1665  **/
1666 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1667                          uint16_t *txxri, uint16_t * rxxri)
1668 {
1669         struct lpfc_bsg_event *evt;
1670         struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1671         IOCB_t *cmd, *rsp;
1672         struct lpfc_dmabuf *dmabuf;
1673         struct ulp_bde64 *bpl = NULL;
1674         struct lpfc_sli_ct_request *ctreq = NULL;
1675         int ret_val = 0;
1676         unsigned long flags;
1677
1678         *txxri = 0;
1679         *rxxri = 0;
1680         evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1681                                 SLI_CT_ELX_LOOPBACK);
1682         if (!evt)
1683                 return ENOMEM;
1684
1685         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1686         list_add(&evt->node, &phba->ct_ev_waiters);
1687         lpfc_bsg_event_ref(evt);
1688         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1689
1690         cmdiocbq = lpfc_sli_get_iocbq(phba);
1691         rspiocbq = lpfc_sli_get_iocbq(phba);
1692
1693         dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1694         if (dmabuf) {
1695                 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1696                 INIT_LIST_HEAD(&dmabuf->list);
1697                 bpl = (struct ulp_bde64 *) dmabuf->virt;
1698                 memset(bpl, 0, sizeof(*bpl));
1699                 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1700                 bpl->addrHigh =
1701                         le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
1702                 bpl->addrLow =
1703                         le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
1704                 bpl->tus.f.bdeFlags = 0;
1705                 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1706                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1707         }
1708
1709         if (cmdiocbq == NULL || rspiocbq == NULL ||
1710             dmabuf == NULL || bpl == NULL || ctreq == NULL) {
1711                 ret_val = ENOMEM;
1712                 goto err_get_xri_exit;
1713         }
1714
1715         cmd = &cmdiocbq->iocb;
1716         rsp = &rspiocbq->iocb;
1717
1718         memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1719
1720         ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1721         ctreq->RevisionId.bits.InId = 0;
1722         ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1723         ctreq->FsSubType = 0;
1724         ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1725         ctreq->CommandResponse.bits.Size = 0;
1726
1727
1728         cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1729         cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1730         cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1731         cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1732
1733         cmd->un.xseq64.w5.hcsw.Fctl = LA;
1734         cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1735         cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1736         cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1737
1738         cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1739         cmd->ulpBdeCount = 1;
1740         cmd->ulpLe = 1;
1741         cmd->ulpClass = CLASS3;
1742         cmd->ulpContext = rpi;
1743
1744         cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1745         cmdiocbq->vport = phba->pport;
1746
1747         ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1748                                 rspiocbq,
1749                                 (phba->fc_ratov * 2)
1750                                 + LPFC_DRVR_TIMEOUT);
1751         if (ret_val)
1752                 goto err_get_xri_exit;
1753
1754         *txxri =  rsp->ulpContext;
1755
1756         evt->waiting = 1;
1757         evt->wait_time_stamp = jiffies;
1758         ret_val = wait_event_interruptible_timeout(
1759                 evt->wq, !list_empty(&evt->events_to_see),
1760                 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1761         if (list_empty(&evt->events_to_see))
1762                 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1763         else {
1764                 ret_val = IOCB_SUCCESS;
1765                 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1766                 list_move(evt->events_to_see.prev, &evt->events_to_get);
1767                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1768                 *rxxri = (list_entry(evt->events_to_get.prev,
1769                                      typeof(struct event_data),
1770                                      node))->immed_dat;
1771         }
1772         evt->waiting = 0;
1773
1774 err_get_xri_exit:
1775         spin_lock_irqsave(&phba->ct_ev_lock, flags);
1776         lpfc_bsg_event_unref(evt); /* release ref */
1777         lpfc_bsg_event_unref(evt); /* delete */
1778         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1779
1780         if (dmabuf) {
1781                 if (dmabuf->virt)
1782                         lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1783                 kfree(dmabuf);
1784         }
1785
1786         if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1787                 lpfc_sli_release_iocbq(phba, cmdiocbq);
1788         if (rspiocbq)
1789                 lpfc_sli_release_iocbq(phba, rspiocbq);
1790         return ret_val;
1791 }
1792
1793 /**
1794  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1795  * @phba: Pointer to HBA context object
1796  * @bpl: Pointer to 64 bit bde structure
1797  * @size: Number of bytes to process
1798  * @nocopydata: Flag to copy user data into the allocated buffer
1799  *
1800  * This function allocates page size buffers and populates an lpfc_dmabufext.
1801  * If allowed the user data pointed to with indataptr is copied into the kernel
1802  * memory. The chained list of page size buffers is returned.
1803  **/
1804 static struct lpfc_dmabufext *
1805 diag_cmd_data_alloc(struct lpfc_hba *phba,
1806                    struct ulp_bde64 *bpl, uint32_t size,
1807                    int nocopydata)
1808 {
1809         struct lpfc_dmabufext *mlist = NULL;
1810         struct lpfc_dmabufext *dmp;
1811         int cnt, offset = 0, i = 0;
1812         struct pci_dev *pcidev;
1813
1814         pcidev = phba->pcidev;
1815
1816         while (size) {
1817                 /* We get chunks of 4K */
1818                 if (size > BUF_SZ_4K)
1819                         cnt = BUF_SZ_4K;
1820                 else
1821                         cnt = size;
1822
1823                 /* allocate struct lpfc_dmabufext buffer header */
1824                 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1825                 if (!dmp)
1826                         goto out;
1827
1828                 INIT_LIST_HEAD(&dmp->dma.list);
1829
1830                 /* Queue it to a linked list */
1831                 if (mlist)
1832                         list_add_tail(&dmp->dma.list, &mlist->dma.list);
1833                 else
1834                         mlist = dmp;
1835
1836                 /* allocate buffer */
1837                 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1838                                                    cnt,
1839                                                    &(dmp->dma.phys),
1840                                                    GFP_KERNEL);
1841
1842                 if (!dmp->dma.virt)
1843                         goto out;
1844
1845                 dmp->size = cnt;
1846
1847                 if (nocopydata) {
1848                         bpl->tus.f.bdeFlags = 0;
1849                         pci_dma_sync_single_for_device(phba->pcidev,
1850                                 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1851
1852                 } else {
1853                         memset((uint8_t *)dmp->dma.virt, 0, cnt);
1854                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1855                 }
1856
1857                 /* build buffer ptr list for IOCB */
1858                 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1859                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1860                 bpl->tus.f.bdeSize = (ushort) cnt;
1861                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1862                 bpl++;
1863
1864                 i++;
1865                 offset += cnt;
1866                 size -= cnt;
1867         }
1868
1869         mlist->flag = i;
1870         return mlist;
1871 out:
1872         diag_cmd_data_free(phba, mlist);
1873         return NULL;
1874 }
1875
1876 /**
1877  * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1878  * @phba: Pointer to HBA context object
1879  * @rxxri: Receive exchange id
1880  * @len: Number of data bytes
1881  *
1882  * This function allocates and posts a data buffer of sufficient size to recieve
1883  * an unsolicted CT command.
1884  **/
1885 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1886                              size_t len)
1887 {
1888         struct lpfc_sli *psli = &phba->sli;
1889         struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1890         struct lpfc_iocbq *cmdiocbq;
1891         IOCB_t *cmd = NULL;
1892         struct list_head head, *curr, *next;
1893         struct lpfc_dmabuf *rxbmp;
1894         struct lpfc_dmabuf *dmp;
1895         struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1896         struct ulp_bde64 *rxbpl = NULL;
1897         uint32_t num_bde;
1898         struct lpfc_dmabufext *rxbuffer = NULL;
1899         int ret_val = 0;
1900         int i = 0;
1901
1902         cmdiocbq = lpfc_sli_get_iocbq(phba);
1903         rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1904         if (rxbmp != NULL) {
1905                 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1906                 INIT_LIST_HEAD(&rxbmp->list);
1907                 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1908                 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1909         }
1910
1911         if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1912                 ret_val = ENOMEM;
1913                 goto err_post_rxbufs_exit;
1914         }
1915
1916         /* Queue buffers for the receive exchange */
1917         num_bde = (uint32_t)rxbuffer->flag;
1918         dmp = &rxbuffer->dma;
1919
1920         cmd = &cmdiocbq->iocb;
1921         i = 0;
1922
1923         INIT_LIST_HEAD(&head);
1924         list_add_tail(&head, &dmp->list);
1925         list_for_each_safe(curr, next, &head) {
1926                 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1927                 list_del(curr);
1928
1929                 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1930                         mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1931                         cmd->un.quexri64cx.buff.bde.addrHigh =
1932                                 putPaddrHigh(mp[i]->phys);
1933                         cmd->un.quexri64cx.buff.bde.addrLow =
1934                                 putPaddrLow(mp[i]->phys);
1935                         cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1936                                 ((struct lpfc_dmabufext *)mp[i])->size;
1937                         cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1938                         cmd->ulpCommand = CMD_QUE_XRI64_CX;
1939                         cmd->ulpPU = 0;
1940                         cmd->ulpLe = 1;
1941                         cmd->ulpBdeCount = 1;
1942                         cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1943
1944                 } else {
1945                         cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1946                         cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1947                         cmd->un.cont64[i].tus.f.bdeSize =
1948                                 ((struct lpfc_dmabufext *)mp[i])->size;
1949                                         cmd->ulpBdeCount = ++i;
1950
1951                         if ((--num_bde > 0) && (i < 2))
1952                                 continue;
1953
1954                         cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1955                         cmd->ulpLe = 1;
1956                 }
1957
1958                 cmd->ulpClass = CLASS3;
1959                 cmd->ulpContext = rxxri;
1960
1961                 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1962
1963                 if (ret_val == IOCB_ERROR) {
1964                         diag_cmd_data_free(phba,
1965                                 (struct lpfc_dmabufext *)mp[0]);
1966                         if (mp[1])
1967                                 diag_cmd_data_free(phba,
1968                                           (struct lpfc_dmabufext *)mp[1]);
1969                         dmp = list_entry(next, struct lpfc_dmabuf, list);
1970                         ret_val = EIO;
1971                         goto err_post_rxbufs_exit;
1972                 }
1973
1974                 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
1975                 if (mp[1]) {
1976                         lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
1977                         mp[1] = NULL;
1978                 }
1979
1980                 /* The iocb was freed by lpfc_sli_issue_iocb */
1981                 cmdiocbq = lpfc_sli_get_iocbq(phba);
1982                 if (!cmdiocbq) {
1983                         dmp = list_entry(next, struct lpfc_dmabuf, list);
1984                         ret_val = EIO;
1985                         goto err_post_rxbufs_exit;
1986                 }
1987
1988                 cmd = &cmdiocbq->iocb;
1989                 i = 0;
1990         }
1991         list_del(&head);
1992
1993 err_post_rxbufs_exit:
1994
1995         if (rxbmp) {
1996                 if (rxbmp->virt)
1997                         lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
1998                 kfree(rxbmp);
1999         }
2000
2001         if (cmdiocbq)
2002                 lpfc_sli_release_iocbq(phba, cmdiocbq);
2003         return ret_val;
2004 }
2005
2006 /**
2007  * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2008  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2009  *
2010  * This function receives a user data buffer to be transmitted and received on
2011  * the same port, the link must be up and in loopback mode prior
2012  * to being called.
2013  * 1. A kernel buffer is allocated to copy the user data into.
2014  * 2. The port registers with "itself".
2015  * 3. The transmit and receive exchange ids are obtained.
2016  * 4. The receive exchange id is posted.
2017  * 5. A new els loopback event is created.
2018  * 6. The command and response iocbs are allocated.
2019  * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2020  *
2021  * This function is meant to be called n times while the port is in loopback
2022  * so it is the apps responsibility to issue a reset to take the port out
2023  * of loopback mode.
2024  **/
2025 static int
2026 lpfc_bsg_diag_test(struct fc_bsg_job *job)
2027 {
2028         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2029         struct lpfc_hba *phba = vport->phba;
2030         struct diag_mode_test *diag_mode;
2031         struct lpfc_bsg_event *evt;
2032         struct event_data *evdat;
2033         struct lpfc_sli *psli = &phba->sli;
2034         uint32_t size;
2035         uint32_t full_size;
2036         size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2037         uint16_t rpi;
2038         struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2039         IOCB_t *cmd, *rsp;
2040         struct lpfc_sli_ct_request *ctreq;
2041         struct lpfc_dmabuf *txbmp;
2042         struct ulp_bde64 *txbpl = NULL;
2043         struct lpfc_dmabufext *txbuffer = NULL;
2044         struct list_head head;
2045         struct lpfc_dmabuf  *curr;
2046         uint16_t txxri, rxxri;
2047         uint32_t num_bde;
2048         uint8_t *ptr = NULL, *rx_databuf = NULL;
2049         int rc = 0;
2050         unsigned long flags;
2051         void *dataout = NULL;
2052         uint32_t total_mem;
2053
2054         /* in case no data is returned return just the return code */
2055         job->reply->reply_payload_rcv_len = 0;
2056
2057         if (job->request_len <
2058             sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2059                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2060                                 "2739 Received DIAG TEST request below minimum "
2061                                 "size\n");
2062                 rc = -EINVAL;
2063                 goto loopback_test_exit;
2064         }
2065
2066         if (job->request_payload.payload_len !=
2067                 job->reply_payload.payload_len) {
2068                 rc = -EINVAL;
2069                 goto loopback_test_exit;
2070         }
2071
2072         diag_mode = (struct diag_mode_test *)
2073                 job->request->rqst_data.h_vendor.vendor_cmd;
2074
2075         if ((phba->link_state == LPFC_HBA_ERROR) ||
2076             (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2077             (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2078                 rc = -EACCES;
2079                 goto loopback_test_exit;
2080         }
2081
2082         if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2083                 rc = -EACCES;
2084                 goto loopback_test_exit;
2085         }
2086
2087         size = job->request_payload.payload_len;
2088         full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2089
2090         if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2091                 rc = -ERANGE;
2092                 goto loopback_test_exit;
2093         }
2094
2095         if (size >= BUF_SZ_4K) {
2096                 /*
2097                  * Allocate memory for ioctl data. If buffer is bigger than 64k,
2098                  * then we allocate 64k and re-use that buffer over and over to
2099                  * xfer the whole block. This is because Linux kernel has a
2100                  * problem allocating more than 120k of kernel space memory. Saw
2101                  * problem with GET_FCPTARGETMAPPING...
2102                  */
2103                 if (size <= (64 * 1024))
2104                         total_mem = size;
2105                 else
2106                         total_mem = 64 * 1024;
2107         } else
2108                 /* Allocate memory for ioctl data */
2109                 total_mem = BUF_SZ_4K;
2110
2111         dataout = kmalloc(total_mem, GFP_KERNEL);
2112         if (dataout == NULL) {
2113                 rc = -ENOMEM;
2114                 goto loopback_test_exit;
2115         }
2116
2117         ptr = dataout;
2118         ptr += ELX_LOOPBACK_HEADER_SZ;
2119         sg_copy_to_buffer(job->request_payload.sg_list,
2120                                 job->request_payload.sg_cnt,
2121                                 ptr, size);
2122
2123         rc = lpfcdiag_loop_self_reg(phba, &rpi);
2124         if (rc) {
2125                 rc = -ENOMEM;
2126                 goto loopback_test_exit;
2127         }
2128
2129         rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2130         if (rc) {
2131                 lpfcdiag_loop_self_unreg(phba, rpi);
2132                 rc = -ENOMEM;
2133                 goto loopback_test_exit;
2134         }
2135
2136         rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2137         if (rc) {
2138                 lpfcdiag_loop_self_unreg(phba, rpi);
2139                 rc = -ENOMEM;
2140                 goto loopback_test_exit;
2141         }
2142
2143         evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2144                                 SLI_CT_ELX_LOOPBACK);
2145         if (!evt) {
2146                 lpfcdiag_loop_self_unreg(phba, rpi);
2147                 rc = -ENOMEM;
2148                 goto loopback_test_exit;
2149         }
2150
2151         spin_lock_irqsave(&phba->ct_ev_lock, flags);
2152         list_add(&evt->node, &phba->ct_ev_waiters);
2153         lpfc_bsg_event_ref(evt);
2154         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2155
2156         cmdiocbq = lpfc_sli_get_iocbq(phba);
2157         rspiocbq = lpfc_sli_get_iocbq(phba);
2158         txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2159
2160         if (txbmp) {
2161                 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2162                 INIT_LIST_HEAD(&txbmp->list);
2163                 txbpl = (struct ulp_bde64 *) txbmp->virt;
2164                 if (txbpl)
2165                         txbuffer = diag_cmd_data_alloc(phba,
2166                                                         txbpl, full_size, 0);
2167         }
2168
2169         if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
2170                 rc = -ENOMEM;
2171                 goto err_loopback_test_exit;
2172         }
2173
2174         cmd = &cmdiocbq->iocb;
2175         rsp = &rspiocbq->iocb;
2176
2177         INIT_LIST_HEAD(&head);
2178         list_add_tail(&head, &txbuffer->dma.list);
2179         list_for_each_entry(curr, &head, list) {
2180                 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2181                 if (current_offset == 0) {
2182                         ctreq = curr->virt;
2183                         memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2184                         ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2185                         ctreq->RevisionId.bits.InId = 0;
2186                         ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2187                         ctreq->FsSubType = 0;
2188                         ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2189                         ctreq->CommandResponse.bits.Size   = size;
2190                         segment_offset = ELX_LOOPBACK_HEADER_SZ;
2191                 } else
2192                         segment_offset = 0;
2193
2194                 BUG_ON(segment_offset >= segment_len);
2195                 memcpy(curr->virt + segment_offset,
2196                         ptr + current_offset,
2197                         segment_len - segment_offset);
2198
2199                 current_offset += segment_len - segment_offset;
2200                 BUG_ON(current_offset > size);
2201         }
2202         list_del(&head);
2203
2204         /* Build the XMIT_SEQUENCE iocb */
2205
2206         num_bde = (uint32_t)txbuffer->flag;
2207
2208         cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2209         cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2210         cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2211         cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2212
2213         cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2214         cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2215         cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2216         cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2217
2218         cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2219         cmd->ulpBdeCount = 1;
2220         cmd->ulpLe = 1;
2221         cmd->ulpClass = CLASS3;
2222         cmd->ulpContext = txxri;
2223
2224         cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2225         cmdiocbq->vport = phba->pport;
2226
2227         rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2228                                       (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2229
2230         if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2231                 rc = -EIO;
2232                 goto err_loopback_test_exit;
2233         }
2234
2235         evt->waiting = 1;
2236         rc = wait_event_interruptible_timeout(
2237                 evt->wq, !list_empty(&evt->events_to_see),
2238                 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2239         evt->waiting = 0;
2240         if (list_empty(&evt->events_to_see))
2241                 rc = (rc) ? -EINTR : -ETIMEDOUT;
2242         else {
2243                 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2244                 list_move(evt->events_to_see.prev, &evt->events_to_get);
2245                 evdat = list_entry(evt->events_to_get.prev,
2246                                    typeof(*evdat), node);
2247                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2248                 rx_databuf = evdat->data;
2249                 if (evdat->len != full_size) {
2250                         lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2251                                 "1603 Loopback test did not receive expected "
2252                                 "data length. actual length 0x%x expected "
2253                                 "length 0x%x\n",
2254                                 evdat->len, full_size);
2255                         rc = -EIO;
2256                 } else if (rx_databuf == NULL)
2257                         rc = -EIO;
2258                 else {
2259                         rc = IOCB_SUCCESS;
2260                         /* skip over elx loopback header */
2261                         rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2262                         job->reply->reply_payload_rcv_len =
2263                                 sg_copy_from_buffer(job->reply_payload.sg_list,
2264                                                     job->reply_payload.sg_cnt,
2265                                                     rx_databuf, size);
2266                         job->reply->reply_payload_rcv_len = size;
2267                 }
2268         }
2269
2270 err_loopback_test_exit:
2271         lpfcdiag_loop_self_unreg(phba, rpi);
2272
2273         spin_lock_irqsave(&phba->ct_ev_lock, flags);
2274         lpfc_bsg_event_unref(evt); /* release ref */
2275         lpfc_bsg_event_unref(evt); /* delete */
2276         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2277
2278         if (cmdiocbq != NULL)
2279                 lpfc_sli_release_iocbq(phba, cmdiocbq);
2280
2281         if (rspiocbq != NULL)
2282                 lpfc_sli_release_iocbq(phba, rspiocbq);
2283
2284         if (txbmp != NULL) {
2285                 if (txbpl != NULL) {
2286                         if (txbuffer != NULL)
2287                                 diag_cmd_data_free(phba, txbuffer);
2288                         lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2289                 }
2290                 kfree(txbmp);
2291         }
2292
2293 loopback_test_exit:
2294         kfree(dataout);
2295         /* make error code available to userspace */
2296         job->reply->result = rc;
2297         job->dd_data = NULL;
2298         /* complete the job back to userspace if no error */
2299         if (rc == 0)
2300                 job->job_done(job);
2301         return rc;
2302 }
2303
2304 /**
2305  * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2306  * @job: GET_DFC_REV fc_bsg_job
2307  **/
2308 static int
2309 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2310 {
2311         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2312         struct lpfc_hba *phba = vport->phba;
2313         struct get_mgmt_rev *event_req;
2314         struct get_mgmt_rev_reply *event_reply;
2315         int rc = 0;
2316
2317         if (job->request_len <
2318             sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2319                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2320                                 "2740 Received GET_DFC_REV request below "
2321                                 "minimum size\n");
2322                 rc = -EINVAL;
2323                 goto job_error;
2324         }
2325
2326         event_req = (struct get_mgmt_rev *)
2327                 job->request->rqst_data.h_vendor.vendor_cmd;
2328
2329         event_reply = (struct get_mgmt_rev_reply *)
2330                 job->reply->reply_data.vendor_reply.vendor_rsp;
2331
2332         if (job->reply_len <
2333             sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2334                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2335                                 "2741 Received GET_DFC_REV reply below "
2336                                 "minimum size\n");
2337                 rc = -EINVAL;
2338                 goto job_error;
2339         }
2340
2341         event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2342         event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2343 job_error:
2344         job->reply->result = rc;
2345         if (rc == 0)
2346                 job->job_done(job);
2347         return rc;
2348 }
2349
2350 /**
2351  * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2352  * @phba: Pointer to HBA context object.
2353  * @pmboxq: Pointer to mailbox command.
2354  *
2355  * This is completion handler function for mailbox commands issued from
2356  * lpfc_bsg_issue_mbox function. This function is called by the
2357  * mailbox event handler function with no lock held. This function
2358  * will wake up thread waiting on the wait queue pointed by context1
2359  * of the mailbox.
2360  **/
2361 void
2362 lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2363 {
2364         struct bsg_job_data *dd_data;
2365         MAILBOX_t *pmb;
2366         MAILBOX_t *mb;
2367         struct fc_bsg_job *job;
2368         uint32_t size;
2369         unsigned long flags;
2370
2371         spin_lock_irqsave(&phba->ct_ev_lock, flags);
2372         dd_data = pmboxq->context1;
2373         if (!dd_data) {
2374                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2375                 return;
2376         }
2377
2378         pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
2379         mb = dd_data->context_un.mbox.mb;
2380         job = dd_data->context_un.mbox.set_job;
2381         memcpy(mb, pmb, sizeof(*pmb));
2382         size = job->request_payload.payload_len;
2383         job->reply->reply_payload_rcv_len =
2384                 sg_copy_from_buffer(job->reply_payload.sg_list,
2385                                 job->reply_payload.sg_cnt,
2386                                 mb, size);
2387         job->reply->result = 0;
2388         dd_data->context_un.mbox.set_job = NULL;
2389         job->dd_data = NULL;
2390         job->job_done(job);
2391         spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2392         mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2393         kfree(mb);
2394         kfree(dd_data);
2395         return;
2396 }
2397
2398 /**
2399  * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2400  * @phba: Pointer to HBA context object.
2401  * @mb: Pointer to a mailbox object.
2402  * @vport: Pointer to a vport object.
2403  *
2404  * Some commands require the port to be offline, some may not be called from
2405  * the application.
2406  **/
2407 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2408         MAILBOX_t *mb, struct lpfc_vport *vport)
2409 {
2410         /* return negative error values for bsg job */
2411         switch (mb->mbxCommand) {
2412         /* Offline only */
2413         case MBX_INIT_LINK:
2414         case MBX_DOWN_LINK:
2415         case MBX_CONFIG_LINK:
2416         case MBX_CONFIG_RING:
2417         case MBX_RESET_RING:
2418         case MBX_UNREG_LOGIN:
2419         case MBX_CLEAR_LA:
2420         case MBX_DUMP_CONTEXT:
2421         case MBX_RUN_DIAGS:
2422         case MBX_RESTART:
2423         case MBX_SET_MASK:
2424                 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2425                         lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2426                                 "2743 Command 0x%x is illegal in on-line "
2427                                 "state\n",
2428                                 mb->mbxCommand);
2429                         return -EPERM;
2430                 }
2431         case MBX_WRITE_NV:
2432         case MBX_WRITE_VPARMS:
2433         case MBX_LOAD_SM:
2434         case MBX_READ_NV:
2435         case MBX_READ_CONFIG:
2436         case MBX_READ_RCONFIG:
2437         case MBX_READ_STATUS:
2438         case MBX_READ_XRI:
2439         case MBX_READ_REV:
2440         case MBX_READ_LNK_STAT:
2441         case MBX_DUMP_MEMORY:
2442         case MBX_DOWN_LOAD:
2443         case MBX_UPDATE_CFG:
2444         case MBX_KILL_BOARD:
2445         case MBX_LOAD_AREA:
2446         case MBX_LOAD_EXP_ROM:
2447         case MBX_BEACON:
2448         case MBX_DEL_LD_ENTRY:
2449         case MBX_SET_DEBUG:
2450         case MBX_WRITE_WWN:
2451         case MBX_SLI4_CONFIG:
2452         case MBX_READ_EVENT_LOG_STATUS:
2453         case MBX_WRITE_EVENT_LOG:
2454         case MBX_PORT_CAPABILITIES:
2455         case MBX_PORT_IOV_CONTROL:
2456                 break;
2457         case MBX_SET_VARIABLE:
2458         case MBX_RUN_BIU_DIAG64:
2459         case MBX_READ_EVENT_LOG:
2460         case MBX_READ_SPARM64:
2461         case MBX_READ_LA:
2462         case MBX_READ_LA64:
2463         case MBX_REG_LOGIN:
2464         case MBX_REG_LOGIN64:
2465         case MBX_CONFIG_PORT:
2466         case MBX_RUN_BIU_DIAG:
2467         default:
2468                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2469                         "2742 Unknown Command 0x%x\n",
2470                         mb->mbxCommand);
2471                 return -EPERM;
2472         }
2473
2474         return 0; /* ok */
2475 }
2476
2477 /**
2478  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2479  * @phba: Pointer to HBA context object.
2480  * @mb: Pointer to a mailbox object.
2481  * @vport: Pointer to a vport object.
2482  *
2483  * Allocate a tracking object, mailbox command memory, get a mailbox
2484  * from the mailbox pool, copy the caller mailbox command.
2485  *
2486  * If offline and the sli is active we need to poll for the command (port is
2487  * being reset) and com-plete the job, otherwise issue the mailbox command and
2488  * let our completion handler finish the command.
2489  **/
2490 static uint32_t
2491 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2492         struct lpfc_vport *vport)
2493 {
2494         LPFC_MBOXQ_t *pmboxq;
2495         MAILBOX_t *pmb;
2496         MAILBOX_t *mb;
2497         struct bsg_job_data *dd_data;
2498         uint32_t size;
2499         int rc = 0;
2500
2501         /* allocate our bsg tracking structure */
2502         dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2503         if (!dd_data) {
2504                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2505                                 "2727 Failed allocation of dd_data\n");
2506                 return -ENOMEM;
2507         }
2508
2509         mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2510         if (!mb) {
2511                 kfree(dd_data);
2512                 return -ENOMEM;
2513         }
2514
2515         pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2516         if (!pmboxq) {
2517                 kfree(dd_data);
2518                 kfree(mb);
2519                 return -ENOMEM;
2520         }
2521
2522         size = job->request_payload.payload_len;
2523         job->reply->reply_payload_rcv_len =
2524                 sg_copy_to_buffer(job->request_payload.sg_list,
2525                                 job->request_payload.sg_cnt,
2526                                 mb, size);
2527
2528         rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2529         if (rc != 0) {
2530                 kfree(dd_data);
2531                 kfree(mb);
2532                 mempool_free(pmboxq, phba->mbox_mem_pool);
2533                 return rc; /* must be negative */
2534         }
2535
2536         memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2537         pmb = &pmboxq->u.mb;
2538         memcpy(pmb, mb, sizeof(*pmb));
2539         pmb->mbxOwner = OWN_HOST;
2540         pmboxq->context1 = NULL;
2541         pmboxq->vport = vport;
2542
2543         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2544             (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2545                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2546                 if (rc != MBX_SUCCESS) {
2547                         if (rc != MBX_TIMEOUT) {
2548                                 kfree(dd_data);
2549                                 kfree(mb);
2550                                 mempool_free(pmboxq, phba->mbox_mem_pool);
2551                         }
2552                         return  (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2553                 }
2554
2555                 memcpy(mb, pmb, sizeof(*pmb));
2556                 job->reply->reply_payload_rcv_len =
2557                         sg_copy_from_buffer(job->reply_payload.sg_list,
2558                                         job->reply_payload.sg_cnt,
2559                                         mb, size);
2560                 kfree(dd_data);
2561                 kfree(mb);
2562                 mempool_free(pmboxq, phba->mbox_mem_pool);
2563                 /* not waiting mbox already done */
2564                 return 0;
2565         }
2566
2567         /* setup wake call as IOCB callback */
2568         pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2569         /* setup context field to pass wait_queue pointer to wake function */
2570         pmboxq->context1 = dd_data;
2571         dd_data->type = TYPE_MBOX;
2572         dd_data->context_un.mbox.pmboxq = pmboxq;
2573         dd_data->context_un.mbox.mb = mb;
2574         dd_data->context_un.mbox.set_job = job;
2575         job->dd_data = dd_data;
2576         rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2577         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
2578                 kfree(dd_data);
2579                 kfree(mb);
2580                 mempool_free(pmboxq, phba->mbox_mem_pool);
2581                 return -EIO;
2582         }
2583
2584         return 1;
2585 }
2586
2587 /**
2588  * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2589  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2590  **/
2591 static int
2592 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2593 {
2594         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2595         struct lpfc_hba *phba = vport->phba;
2596         int rc = 0;
2597
2598         /* in case no data is transferred */
2599         job->reply->reply_payload_rcv_len = 0;
2600         if (job->request_len <
2601             sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2602                 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2603                                 "2737 Received MBOX_REQ request below "
2604                                 "minimum size\n");
2605                 rc = -EINVAL;
2606                 goto job_error;
2607         }
2608
2609         if (job->request_payload.payload_len != PAGE_SIZE) {
2610                 rc = -EINVAL;
2611                 goto job_error;
2612         }
2613
2614         if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2615                 rc = -EAGAIN;
2616                 goto job_error;
2617         }
2618
2619         rc = lpfc_bsg_issue_mbox(phba, job, vport);
2620
2621 job_error:
2622         if (rc == 0) {
2623                 /* job done */
2624                 job->reply->result = 0;
2625                 job->dd_data = NULL;
2626                 job->job_done(job);
2627         } else if (rc == 1)
2628                 /* job submitted, will complete later*/
2629                 rc = 0; /* return zero, no error */
2630         else {
2631                 /* some error occurred */
2632                 job->reply->result = rc;
2633                 job->dd_data = NULL;
2634         }
2635
2636         return rc;
2637 }
2638
2639 /**
2640  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
2641  * @job: fc_bsg_job to handle
2642  **/
2643 static int
2644 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
2645 {
2646         int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
2647         int rc;
2648
2649         switch (command) {
2650         case LPFC_BSG_VENDOR_SET_CT_EVENT:
2651                 rc = lpfc_bsg_hba_set_event(job);
2652                 break;
2653         case LPFC_BSG_VENDOR_GET_CT_EVENT:
2654                 rc = lpfc_bsg_hba_get_event(job);
2655                 break;
2656         case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
2657                 rc = lpfc_bsg_send_mgmt_rsp(job);
2658                 break;
2659         case LPFC_BSG_VENDOR_DIAG_MODE:
2660                 rc = lpfc_bsg_diag_mode(job);
2661                 break;
2662         case LPFC_BSG_VENDOR_DIAG_TEST:
2663                 rc = lpfc_bsg_diag_test(job);
2664                 break;
2665         case LPFC_BSG_VENDOR_GET_MGMT_REV:
2666                 rc = lpfc_bsg_get_dfc_rev(job);
2667                 break;
2668         case LPFC_BSG_VENDOR_MBOX:
2669                 rc = lpfc_bsg_mbox_cmd(job);
2670                 break;
2671         default:
2672                 rc = -EINVAL;
2673                 job->reply->reply_payload_rcv_len = 0;
2674                 /* make error code available to userspace */
2675                 job->reply->result = rc;
2676                 break;
2677         }
2678
2679         return rc;
2680 }
2681
2682 /**
2683  * lpfc_bsg_request - handle a bsg request from the FC transport
2684  * @job: fc_bsg_job to handle
2685  **/
2686 int
2687 lpfc_bsg_request(struct fc_bsg_job *job)
2688 {
2689         uint32_t msgcode;
2690         int rc;
2691
2692         msgcode = job->request->msgcode;
2693         switch (msgcode) {
2694         case FC_BSG_HST_VENDOR:
2695                 rc = lpfc_bsg_hst_vendor(job);
2696                 break;
2697         case FC_BSG_RPT_ELS:
2698                 rc = lpfc_bsg_rport_els(job);
2699                 break;
2700         case FC_BSG_RPT_CT:
2701                 rc = lpfc_bsg_send_mgmt_cmd(job);
2702                 break;
2703         default:
2704                 rc = -EINVAL;
2705                 job->reply->reply_payload_rcv_len = 0;
2706                 /* make error code available to userspace */
2707                 job->reply->result = rc;
2708                 break;
2709         }
2710
2711         return rc;
2712 }
2713
2714 /**
2715  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
2716  * @job: fc_bsg_job that has timed out
2717  *
2718  * This function just aborts the job's IOCB.  The aborted IOCB will return to
2719  * the waiting function which will handle passing the error back to userspace
2720  **/
2721 int
2722 lpfc_bsg_timeout(struct fc_bsg_job *job)
2723 {
2724         struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2725         struct lpfc_hba *phba = vport->phba;
2726         struct lpfc_iocbq *cmdiocb;
2727         struct lpfc_bsg_event *evt;
2728         struct lpfc_bsg_iocb *iocb;
2729         struct lpfc_bsg_mbox *mbox;
2730         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2731         struct bsg_job_data *dd_data;
2732         unsigned long flags;
2733
2734         spin_lock_irqsave(&phba->ct_ev_lock, flags);
2735         dd_data = (struct bsg_job_data *)job->dd_data;
2736         /* timeout and completion crossed paths if no dd_data */
2737         if (!dd_data) {
2738                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2739                 return 0;
2740         }
2741
2742         switch (dd_data->type) {
2743         case TYPE_IOCB:
2744                 iocb = &dd_data->context_un.iocb;
2745                 cmdiocb = iocb->cmdiocbq;
2746                 /* hint to completion handler that the job timed out */
2747                 job->reply->result = -EAGAIN;
2748                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2749                 /* this will call our completion handler */
2750                 spin_lock_irq(&phba->hbalock);
2751                 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
2752                 spin_unlock_irq(&phba->hbalock);
2753                 break;
2754         case TYPE_EVT:
2755                 evt = dd_data->context_un.evt;
2756                 /* this event has no job anymore */
2757                 evt->set_job = NULL;
2758                 job->dd_data = NULL;
2759                 job->reply->reply_payload_rcv_len = 0;
2760                 /* Return -EAGAIN which is our way of signallying the
2761                  * app to retry.
2762                  */
2763                 job->reply->result = -EAGAIN;
2764                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2765                 job->job_done(job);
2766                 break;
2767         case TYPE_MBOX:
2768                 mbox = &dd_data->context_un.mbox;
2769                 /* this mbox has no job anymore */
2770                 mbox->set_job = NULL;
2771                 job->dd_data = NULL;
2772                 job->reply->reply_payload_rcv_len = 0;
2773                 job->reply->result = -EAGAIN;
2774                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2775                 job->job_done(job);
2776                 break;
2777         default:
2778                 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2779                 break;
2780         }
2781
2782         /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
2783          * otherwise an error message will be displayed on the console
2784          * so always return success (zero)
2785          */
2786         return 0;
2787 }