[SCSI] lpfc 8.2.3 : Miscellaneous Small Fixes - part 1
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 #include "lpfc_debugfs.h"
42
43 /*
44  * Define macro to log: Mailbox command x%x cannot issue Data
45  * This allows multiple uses of lpfc_msgBlk0311
46  * w/o perturbing log msg utility.
47  */
48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
49                         lpfc_printf_log(phba, \
50                                 KERN_INFO, \
51                                 LOG_MBOX | LOG_SLI, \
52                                 "(%d):0311 Mailbox command x%x cannot " \
53                                 "issue Data: x%x x%x x%x\n", \
54                                 pmbox->vport ? pmbox->vport->vpi : 0, \
55                                 pmbox->mb.mbxCommand,           \
56                                 phba->pport->port_state,        \
57                                 psli->sli_flag, \
58                                 flag)
59
60
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63         LPFC_UNKNOWN_IOCB,
64         LPFC_UNSOL_IOCB,
65         LPFC_SOL_IOCB,
66         LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69                 /* SLI-2/SLI-3 provide different sized iocbs.  Given a pointer
70                  * to the start of the ring, and the slot number of the
71                  * desired iocb entry, calc a pointer to that entry.
72                  */
73 static inline IOCB_t *
74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75 {
76         return (IOCB_t *) (((char *) pring->cmdringaddr) +
77                            pring->cmdidx * phba->iocb_cmd_size);
78 }
79
80 static inline IOCB_t *
81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
82 {
83         return (IOCB_t *) (((char *) pring->rspringaddr) +
84                            pring->rspidx * phba->iocb_rsp_size);
85 }
86
87 static struct lpfc_iocbq *
88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
89 {
90         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91         struct lpfc_iocbq * iocbq = NULL;
92
93         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
94         return iocbq;
95 }
96
97 struct lpfc_iocbq *
98 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
99 {
100         struct lpfc_iocbq * iocbq = NULL;
101         unsigned long iflags;
102
103         spin_lock_irqsave(&phba->hbalock, iflags);
104         iocbq = __lpfc_sli_get_iocbq(phba);
105         spin_unlock_irqrestore(&phba->hbalock, iflags);
106         return iocbq;
107 }
108
109 void
110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
111 {
112         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
113
114         /*
115          * Clean all volatile data fields, preserve iotag and node struct.
116          */
117         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
119 }
120
121 void
122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
123 {
124         unsigned long iflags;
125
126         /*
127          * Clean all volatile data fields, preserve iotag and node struct.
128          */
129         spin_lock_irqsave(&phba->hbalock, iflags);
130         __lpfc_sli_release_iocbq(phba, iocbq);
131         spin_unlock_irqrestore(&phba->hbalock, iflags);
132 }
133
134 /*
135  * Translate the iocb command to an iocb command type used to decide the final
136  * disposition of each completed IOCB.
137  */
138 static lpfc_iocb_type
139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
140 {
141         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
142
143         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
144                 return 0;
145
146         switch (iocb_cmnd) {
147         case CMD_XMIT_SEQUENCE_CR:
148         case CMD_XMIT_SEQUENCE_CX:
149         case CMD_XMIT_BCAST_CN:
150         case CMD_XMIT_BCAST_CX:
151         case CMD_ELS_REQUEST_CR:
152         case CMD_ELS_REQUEST_CX:
153         case CMD_CREATE_XRI_CR:
154         case CMD_CREATE_XRI_CX:
155         case CMD_GET_RPI_CN:
156         case CMD_XMIT_ELS_RSP_CX:
157         case CMD_GET_RPI_CR:
158         case CMD_FCP_IWRITE_CR:
159         case CMD_FCP_IWRITE_CX:
160         case CMD_FCP_IREAD_CR:
161         case CMD_FCP_IREAD_CX:
162         case CMD_FCP_ICMND_CR:
163         case CMD_FCP_ICMND_CX:
164         case CMD_FCP_TSEND_CX:
165         case CMD_FCP_TRSP_CX:
166         case CMD_FCP_TRECEIVE_CX:
167         case CMD_FCP_AUTO_TRSP_CX:
168         case CMD_ADAPTER_MSG:
169         case CMD_ADAPTER_DUMP:
170         case CMD_XMIT_SEQUENCE64_CR:
171         case CMD_XMIT_SEQUENCE64_CX:
172         case CMD_XMIT_BCAST64_CN:
173         case CMD_XMIT_BCAST64_CX:
174         case CMD_ELS_REQUEST64_CR:
175         case CMD_ELS_REQUEST64_CX:
176         case CMD_FCP_IWRITE64_CR:
177         case CMD_FCP_IWRITE64_CX:
178         case CMD_FCP_IREAD64_CR:
179         case CMD_FCP_IREAD64_CX:
180         case CMD_FCP_ICMND64_CR:
181         case CMD_FCP_ICMND64_CX:
182         case CMD_FCP_TSEND64_CX:
183         case CMD_FCP_TRSP64_CX:
184         case CMD_FCP_TRECEIVE64_CX:
185         case CMD_GEN_REQUEST64_CR:
186         case CMD_GEN_REQUEST64_CX:
187         case CMD_XMIT_ELS_RSP64_CX:
188                 type = LPFC_SOL_IOCB;
189                 break;
190         case CMD_ABORT_XRI_CN:
191         case CMD_ABORT_XRI_CX:
192         case CMD_CLOSE_XRI_CN:
193         case CMD_CLOSE_XRI_CX:
194         case CMD_XRI_ABORTED_CX:
195         case CMD_ABORT_MXRI64_CN:
196                 type = LPFC_ABORT_IOCB;
197                 break;
198         case CMD_RCV_SEQUENCE_CX:
199         case CMD_RCV_ELS_REQ_CX:
200         case CMD_RCV_SEQUENCE64_CX:
201         case CMD_RCV_ELS_REQ64_CX:
202         case CMD_ASYNC_STATUS:
203         case CMD_IOCB_RCV_SEQ64_CX:
204         case CMD_IOCB_RCV_ELS64_CX:
205         case CMD_IOCB_RCV_CONT64_CX:
206                 type = LPFC_UNSOL_IOCB;
207                 break;
208         default:
209                 type = LPFC_UNKNOWN_IOCB;
210                 break;
211         }
212
213         return type;
214 }
215
216 static int
217 lpfc_sli_ring_map(struct lpfc_hba *phba)
218 {
219         struct lpfc_sli *psli = &phba->sli;
220         LPFC_MBOXQ_t *pmb;
221         MAILBOX_t *pmbox;
222         int i, rc, ret = 0;
223
224         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225         if (!pmb)
226                 return -ENOMEM;
227         pmbox = &pmb->mb;
228         phba->link_state = LPFC_INIT_MBX_CMDS;
229         for (i = 0; i < psli->num_rings; i++) {
230                 lpfc_config_ring(phba, i, pmb);
231                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
232                 if (rc != MBX_SUCCESS) {
233                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
234                                         "0446 Adapter failed to init (%d), "
235                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
236                                         "ring %d\n",
237                                         rc, pmbox->mbxCommand,
238                                         pmbox->mbxStatus, i);
239                         phba->link_state = LPFC_HBA_ERROR;
240                         ret = -ENXIO;
241                         break;
242                 }
243         }
244         mempool_free(pmb, phba->mbox_mem_pool);
245         return ret;
246 }
247
248 static int
249 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
250                         struct lpfc_iocbq *piocb)
251 {
252         list_add_tail(&piocb->list, &pring->txcmplq);
253         pring->txcmplq_cnt++;
254         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
255            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
256            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
257                 if (!piocb->vport)
258                         BUG();
259                 else
260                         mod_timer(&piocb->vport->els_tmofunc,
261                                   jiffies + HZ * (phba->fc_ratov << 1));
262         }
263
264
265         return 0;
266 }
267
268 static struct lpfc_iocbq *
269 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
270 {
271         struct lpfc_iocbq *cmd_iocb;
272
273         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
274         if (cmd_iocb != NULL)
275                 pring->txq_cnt--;
276         return cmd_iocb;
277 }
278
279 static IOCB_t *
280 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
281 {
282         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
283                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
284                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
285         uint32_t  max_cmd_idx = pring->numCiocb;
286
287         if ((pring->next_cmdidx == pring->cmdidx) &&
288            (++pring->next_cmdidx >= max_cmd_idx))
289                 pring->next_cmdidx = 0;
290
291         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
292
293                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
294
295                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
296                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
297                                         "0315 Ring %d issue: portCmdGet %d "
298                                         "is bigger then cmd ring %d\n",
299                                         pring->ringno,
300                                         pring->local_getidx, max_cmd_idx);
301
302                         phba->link_state = LPFC_HBA_ERROR;
303                         /*
304                          * All error attention handlers are posted to
305                          * worker thread
306                          */
307                         phba->work_ha |= HA_ERATT;
308                         phba->work_hs = HS_FFER3;
309
310                         /* hbalock should already be held */
311                         if (phba->work_wait)
312                                 lpfc_worker_wake_up(phba);
313
314                         return NULL;
315                 }
316
317                 if (pring->local_getidx == pring->next_cmdidx)
318                         return NULL;
319         }
320
321         return lpfc_cmd_iocb(phba, pring);
322 }
323
324 uint16_t
325 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
326 {
327         struct lpfc_iocbq **new_arr;
328         struct lpfc_iocbq **old_arr;
329         size_t new_len;
330         struct lpfc_sli *psli = &phba->sli;
331         uint16_t iotag;
332
333         spin_lock_irq(&phba->hbalock);
334         iotag = psli->last_iotag;
335         if(++iotag < psli->iocbq_lookup_len) {
336                 psli->last_iotag = iotag;
337                 psli->iocbq_lookup[iotag] = iocbq;
338                 spin_unlock_irq(&phba->hbalock);
339                 iocbq->iotag = iotag;
340                 return iotag;
341         } else if (psli->iocbq_lookup_len < (0xffff
342                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
343                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
344                 spin_unlock_irq(&phba->hbalock);
345                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
346                                   GFP_KERNEL);
347                 if (new_arr) {
348                         spin_lock_irq(&phba->hbalock);
349                         old_arr = psli->iocbq_lookup;
350                         if (new_len <= psli->iocbq_lookup_len) {
351                                 /* highly unprobable case */
352                                 kfree(new_arr);
353                                 iotag = psli->last_iotag;
354                                 if(++iotag < psli->iocbq_lookup_len) {
355                                         psli->last_iotag = iotag;
356                                         psli->iocbq_lookup[iotag] = iocbq;
357                                         spin_unlock_irq(&phba->hbalock);
358                                         iocbq->iotag = iotag;
359                                         return iotag;
360                                 }
361                                 spin_unlock_irq(&phba->hbalock);
362                                 return 0;
363                         }
364                         if (psli->iocbq_lookup)
365                                 memcpy(new_arr, old_arr,
366                                        ((psli->last_iotag  + 1) *
367                                         sizeof (struct lpfc_iocbq *)));
368                         psli->iocbq_lookup = new_arr;
369                         psli->iocbq_lookup_len = new_len;
370                         psli->last_iotag = iotag;
371                         psli->iocbq_lookup[iotag] = iocbq;
372                         spin_unlock_irq(&phba->hbalock);
373                         iocbq->iotag = iotag;
374                         kfree(old_arr);
375                         return iotag;
376                 }
377         } else
378                 spin_unlock_irq(&phba->hbalock);
379
380         lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
381                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
382                         psli->last_iotag);
383
384         return 0;
385 }
386
387 static void
388 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
389                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
390 {
391         /*
392          * Set up an iotag
393          */
394         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
395
396         if (pring->ringno == LPFC_ELS_RING) {
397                 lpfc_debugfs_slow_ring_trc(phba,
398                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
399                         *(((uint32_t *) &nextiocb->iocb) + 4),
400                         *(((uint32_t *) &nextiocb->iocb) + 6),
401                         *(((uint32_t *) &nextiocb->iocb) + 7));
402         }
403
404         /*
405          * Issue iocb command to adapter
406          */
407         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
408         wmb();
409         pring->stats.iocb_cmd++;
410
411         /*
412          * If there is no completion routine to call, we can release the
413          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
414          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
415          */
416         if (nextiocb->iocb_cmpl)
417                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
418         else
419                 __lpfc_sli_release_iocbq(phba, nextiocb);
420
421         /*
422          * Let the HBA know what IOCB slot will be the next one the
423          * driver will put a command into.
424          */
425         pring->cmdidx = pring->next_cmdidx;
426         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
427 }
428
429 static void
430 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
431 {
432         int ringno = pring->ringno;
433
434         pring->flag |= LPFC_CALL_RING_AVAILABLE;
435
436         wmb();
437
438         /*
439          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
440          * The HBA will tell us when an IOCB entry is available.
441          */
442         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
443         readl(phba->CAregaddr); /* flush */
444
445         pring->stats.iocb_cmd_full++;
446 }
447
448 static void
449 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
450 {
451         int ringno = pring->ringno;
452
453         /*
454          * Tell the HBA that there is work to do in this ring.
455          */
456         wmb();
457         writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
458         readl(phba->CAregaddr); /* flush */
459 }
460
461 static void
462 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
463 {
464         IOCB_t *iocb;
465         struct lpfc_iocbq *nextiocb;
466
467         /*
468          * Check to see if:
469          *  (a) there is anything on the txq to send
470          *  (b) link is up
471          *  (c) link attention events can be processed (fcp ring only)
472          *  (d) IOCB processing is not blocked by the outstanding mbox command.
473          */
474         if (pring->txq_cnt &&
475             lpfc_is_link_up(phba) &&
476             (pring->ringno != phba->sli.fcp_ring ||
477              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
478
479                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
480                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
481                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
482
483                 if (iocb)
484                         lpfc_sli_update_ring(phba, pring);
485                 else
486                         lpfc_sli_update_full_ring(phba, pring);
487         }
488
489         return;
490 }
491
492 struct lpfc_hbq_entry *
493 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
494 {
495         struct hbq_s *hbqp = &phba->hbqs[hbqno];
496
497         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
498             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
499                 hbqp->next_hbqPutIdx = 0;
500
501         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
502                 uint32_t raw_index = phba->hbq_get[hbqno];
503                 uint32_t getidx = le32_to_cpu(raw_index);
504
505                 hbqp->local_hbqGetIdx = getidx;
506
507                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
508                         lpfc_printf_log(phba, KERN_ERR,
509                                         LOG_SLI | LOG_VPORT,
510                                         "1802 HBQ %d: local_hbqGetIdx "
511                                         "%u is > than hbqp->entry_count %u\n",
512                                         hbqno, hbqp->local_hbqGetIdx,
513                                         hbqp->entry_count);
514
515                         phba->link_state = LPFC_HBA_ERROR;
516                         return NULL;
517                 }
518
519                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
520                         return NULL;
521         }
522
523         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
524                         hbqp->hbqPutIdx;
525 }
526
527 void
528 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
529 {
530         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
531         struct hbq_dmabuf *hbq_buf;
532         int i, hbq_count;
533
534         hbq_count = lpfc_sli_hbq_count();
535         /* Return all memory used by all HBQs */
536         for (i = 0; i < hbq_count; ++i) {
537                 list_for_each_entry_safe(dmabuf, next_dmabuf,
538                                 &phba->hbqs[i].hbq_buffer_list, list) {
539                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
540                         list_del(&hbq_buf->dbuf.list);
541                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
542                 }
543         }
544 }
545
546 static struct lpfc_hbq_entry *
547 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
548                          struct hbq_dmabuf *hbq_buf)
549 {
550         struct lpfc_hbq_entry *hbqe;
551         dma_addr_t physaddr = hbq_buf->dbuf.phys;
552
553         /* Get next HBQ entry slot to use */
554         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
555         if (hbqe) {
556                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
557
558                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
559                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
560                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
561                 hbqe->bde.tus.f.bdeFlags = 0;
562                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
563                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
564                                 /* Sync SLIM */
565                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
566                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
567                                 /* flush */
568                 readl(phba->hbq_put + hbqno);
569                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
570         }
571         return hbqe;
572 }
573
574 static struct lpfc_hbq_init lpfc_els_hbq = {
575         .rn = 1,
576         .entry_count = 200,
577         .mask_count = 0,
578         .profile = 0,
579         .ring_mask = (1 << LPFC_ELS_RING),
580         .buffer_count = 0,
581         .init_count = 20,
582         .add_count = 5,
583 };
584
585 static struct lpfc_hbq_init lpfc_extra_hbq = {
586         .rn = 1,
587         .entry_count = 200,
588         .mask_count = 0,
589         .profile = 0,
590         .ring_mask = (1 << LPFC_EXTRA_RING),
591         .buffer_count = 0,
592         .init_count = 0,
593         .add_count = 5,
594 };
595
596 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
597         &lpfc_els_hbq,
598         &lpfc_extra_hbq,
599 };
600
601 static int
602 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
603 {
604         uint32_t i, start, end;
605         struct hbq_dmabuf *hbq_buffer;
606
607         if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
608                 return 0;
609         }
610
611         start = lpfc_hbq_defs[hbqno]->buffer_count;
612         end = count + lpfc_hbq_defs[hbqno]->buffer_count;
613         if (end > lpfc_hbq_defs[hbqno]->entry_count) {
614                 end = lpfc_hbq_defs[hbqno]->entry_count;
615         }
616
617         /* Populate HBQ entries */
618         for (i = start; i < end; i++) {
619                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
620                 if (!hbq_buffer)
621                         return 1;
622                 hbq_buffer->tag = (i | (hbqno << 16));
623                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
624                         lpfc_hbq_defs[hbqno]->buffer_count++;
625                 else
626                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
627         }
628         return 0;
629 }
630
631 int
632 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
633 {
634         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
635                                          lpfc_hbq_defs[qno]->add_count));
636 }
637
638 int
639 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
640 {
641         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
642                                          lpfc_hbq_defs[qno]->init_count));
643 }
644
645 struct hbq_dmabuf *
646 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
647 {
648         struct lpfc_dmabuf *d_buf;
649         struct hbq_dmabuf *hbq_buf;
650         uint32_t hbqno;
651
652         hbqno = tag >> 16;
653         if (hbqno >= LPFC_MAX_HBQS)
654                 return NULL;
655
656         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
657                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
658                 if (hbq_buf->tag == tag) {
659                         return hbq_buf;
660                 }
661         }
662         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
663                         "1803 Bad hbq tag. Data: x%x x%x\n",
664                         tag, lpfc_hbq_defs[tag >> 16]->buffer_count);
665         return NULL;
666 }
667
668 void
669 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
670 {
671         uint32_t hbqno;
672
673         if (hbq_buffer) {
674                 hbqno = hbq_buffer->tag >> 16;
675                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
676                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
677                 }
678         }
679 }
680
681 static int
682 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
683 {
684         uint8_t ret;
685
686         switch (mbxCommand) {
687         case MBX_LOAD_SM:
688         case MBX_READ_NV:
689         case MBX_WRITE_NV:
690         case MBX_RUN_BIU_DIAG:
691         case MBX_INIT_LINK:
692         case MBX_DOWN_LINK:
693         case MBX_CONFIG_LINK:
694         case MBX_CONFIG_RING:
695         case MBX_RESET_RING:
696         case MBX_READ_CONFIG:
697         case MBX_READ_RCONFIG:
698         case MBX_READ_SPARM:
699         case MBX_READ_STATUS:
700         case MBX_READ_RPI:
701         case MBX_READ_XRI:
702         case MBX_READ_REV:
703         case MBX_READ_LNK_STAT:
704         case MBX_REG_LOGIN:
705         case MBX_UNREG_LOGIN:
706         case MBX_READ_LA:
707         case MBX_CLEAR_LA:
708         case MBX_DUMP_MEMORY:
709         case MBX_DUMP_CONTEXT:
710         case MBX_RUN_DIAGS:
711         case MBX_RESTART:
712         case MBX_UPDATE_CFG:
713         case MBX_DOWN_LOAD:
714         case MBX_DEL_LD_ENTRY:
715         case MBX_RUN_PROGRAM:
716         case MBX_SET_MASK:
717         case MBX_SET_SLIM:
718         case MBX_UNREG_D_ID:
719         case MBX_KILL_BOARD:
720         case MBX_CONFIG_FARP:
721         case MBX_BEACON:
722         case MBX_LOAD_AREA:
723         case MBX_RUN_BIU_DIAG64:
724         case MBX_CONFIG_PORT:
725         case MBX_READ_SPARM64:
726         case MBX_READ_RPI64:
727         case MBX_REG_LOGIN64:
728         case MBX_READ_LA64:
729         case MBX_FLASH_WR_ULA:
730         case MBX_SET_DEBUG:
731         case MBX_LOAD_EXP_ROM:
732         case MBX_ASYNCEVT_ENABLE:
733         case MBX_REG_VPI:
734         case MBX_UNREG_VPI:
735         case MBX_HEARTBEAT:
736                 ret = mbxCommand;
737                 break;
738         default:
739                 ret = MBX_SHUTDOWN;
740                 break;
741         }
742         return ret;
743 }
744 static void
745 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
746 {
747         wait_queue_head_t *pdone_q;
748         unsigned long drvr_flag;
749
750         /*
751          * If pdone_q is empty, the driver thread gave up waiting and
752          * continued running.
753          */
754         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
755         spin_lock_irqsave(&phba->hbalock, drvr_flag);
756         pdone_q = (wait_queue_head_t *) pmboxq->context1;
757         if (pdone_q)
758                 wake_up_interruptible(pdone_q);
759         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
760         return;
761 }
762
763 void
764 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
765 {
766         struct lpfc_dmabuf *mp;
767         uint16_t rpi;
768         int rc;
769
770         mp = (struct lpfc_dmabuf *) (pmb->context1);
771
772         if (mp) {
773                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
774                 kfree(mp);
775         }
776
777         /*
778          * If a REG_LOGIN succeeded  after node is destroyed or node
779          * is in re-discovery driver need to cleanup the RPI.
780          */
781         if (!(phba->pport->load_flag & FC_UNLOADING) &&
782             pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
783             !pmb->mb.mbxStatus) {
784
785                 rpi = pmb->mb.un.varWords[0];
786                 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
787                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
788                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
789                 if (rc != MBX_NOT_FINISHED)
790                         return;
791         }
792
793         mempool_free(pmb, phba->mbox_mem_pool);
794         return;
795 }
796
797 int
798 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
799 {
800         MAILBOX_t *pmbox;
801         LPFC_MBOXQ_t *pmb;
802         int rc;
803         LIST_HEAD(cmplq);
804
805         phba->sli.slistat.mbox_event++;
806
807         /* Get all completed mailboxe buffers into the cmplq */
808         spin_lock_irq(&phba->hbalock);
809         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
810         spin_unlock_irq(&phba->hbalock);
811
812         /* Get a Mailbox buffer to setup mailbox commands for callback */
813         do {
814                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
815                 if (pmb == NULL)
816                         break;
817
818                 pmbox = &pmb->mb;
819
820                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
821                         if (pmb->vport) {
822                                 lpfc_debugfs_disc_trc(pmb->vport,
823                                         LPFC_DISC_TRC_MBOX_VPORT,
824                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
825                                         (uint32_t)pmbox->mbxCommand,
826                                         pmbox->un.varWords[0],
827                                         pmbox->un.varWords[1]);
828                         }
829                         else {
830                                 lpfc_debugfs_disc_trc(phba->pport,
831                                         LPFC_DISC_TRC_MBOX,
832                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
833                                         (uint32_t)pmbox->mbxCommand,
834                                         pmbox->un.varWords[0],
835                                         pmbox->un.varWords[1]);
836                         }
837                 }
838
839                 /*
840                  * It is a fatal error if unknown mbox command completion.
841                  */
842                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
843                     MBX_SHUTDOWN) {
844                         /* Unknow mailbox command compl */
845                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
846                                         "(%d):0323 Unknown Mailbox command "
847                                         "%x Cmpl\n",
848                                         pmb->vport ? pmb->vport->vpi : 0,
849                                         pmbox->mbxCommand);
850                         phba->link_state = LPFC_HBA_ERROR;
851                         phba->work_hs = HS_FFER3;
852                         lpfc_handle_eratt(phba);
853                         continue;
854                 }
855
856                 if (pmbox->mbxStatus) {
857                         phba->sli.slistat.mbox_stat_err++;
858                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
859                                 /* Mbox cmd cmpl error - RETRYing */
860                                 lpfc_printf_log(phba, KERN_INFO,
861                                                 LOG_MBOX | LOG_SLI,
862                                                 "(%d):0305 Mbox cmd cmpl "
863                                                 "error - RETRYing Data: x%x "
864                                                 "x%x x%x x%x\n",
865                                                 pmb->vport ? pmb->vport->vpi :0,
866                                                 pmbox->mbxCommand,
867                                                 pmbox->mbxStatus,
868                                                 pmbox->un.varWords[0],
869                                                 pmb->vport->port_state);
870                                 pmbox->mbxStatus = 0;
871                                 pmbox->mbxOwner = OWN_HOST;
872                                 spin_lock_irq(&phba->hbalock);
873                                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
874                                 spin_unlock_irq(&phba->hbalock);
875                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
876                                 if (rc == MBX_SUCCESS)
877                                         continue;
878                         }
879                 }
880
881                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
882                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
883                                 "(%d):0307 Mailbox cmd x%x Cmpl x%p "
884                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
885                                 pmb->vport ? pmb->vport->vpi : 0,
886                                 pmbox->mbxCommand,
887                                 pmb->mbox_cmpl,
888                                 *((uint32_t *) pmbox),
889                                 pmbox->un.varWords[0],
890                                 pmbox->un.varWords[1],
891                                 pmbox->un.varWords[2],
892                                 pmbox->un.varWords[3],
893                                 pmbox->un.varWords[4],
894                                 pmbox->un.varWords[5],
895                                 pmbox->un.varWords[6],
896                                 pmbox->un.varWords[7]);
897
898                 if (pmb->mbox_cmpl)
899                         pmb->mbox_cmpl(phba,pmb);
900         } while (1);
901         return 0;
902 }
903
904 static struct lpfc_dmabuf *
905 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
906 {
907         struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
908         uint32_t hbqno;
909         void *virt;             /* virtual address ptr */
910         dma_addr_t phys;        /* mapped address */
911
912         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
913         if (hbq_entry == NULL)
914                 return NULL;
915         list_del(&hbq_entry->dbuf.list);
916
917         hbqno = tag >> 16;
918         new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
919         if (new_hbq_entry == NULL)
920                 return &hbq_entry->dbuf;
921         new_hbq_entry->tag = -1;
922         phys = new_hbq_entry->dbuf.phys;
923         virt = new_hbq_entry->dbuf.virt;
924         new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
925         new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
926         hbq_entry->dbuf.phys = phys;
927         hbq_entry->dbuf.virt = virt;
928         lpfc_sli_free_hbq(phba, hbq_entry);
929         return &new_hbq_entry->dbuf;
930 }
931
932
933 static int
934 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
935                             struct lpfc_iocbq *saveq)
936 {
937         IOCB_t           * irsp;
938         WORD5            * w5p;
939         uint32_t           Rctl, Type;
940         uint32_t           match, i;
941
942         match = 0;
943         irsp = &(saveq->iocb);
944
945         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
946                 if (pring->lpfc_sli_rcv_async_status)
947                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
948                 else
949                         lpfc_printf_log(phba,
950                                         KERN_WARNING,
951                                         LOG_SLI,
952                                         "0316 Ring %d handler: unexpected "
953                                         "ASYNC_STATUS iocb received evt_code "
954                                         "0x%x\n",
955                                         pring->ringno,
956                                         irsp->un.asyncstat.evt_code);
957                 return 1;
958         }
959
960         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
961             || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
962             || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
963             || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
964                 Rctl = FC_ELS_REQ;
965                 Type = FC_ELS_DATA;
966         } else {
967                 w5p =
968                     (WORD5 *) & (saveq->iocb.un.
969                                  ulpWord[5]);
970                 Rctl = w5p->hcsw.Rctl;
971                 Type = w5p->hcsw.Type;
972
973                 /* Firmware Workaround */
974                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
975                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
976                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
977                         Rctl = FC_ELS_REQ;
978                         Type = FC_ELS_DATA;
979                         w5p->hcsw.Rctl = Rctl;
980                         w5p->hcsw.Type = Type;
981                 }
982         }
983
984         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
985                 if (irsp->ulpBdeCount != 0)
986                         saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
987                                                 irsp->un.ulpWord[3]);
988                 if (irsp->ulpBdeCount == 2)
989                         saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
990                                                 irsp->unsli3.sli3Words[7]);
991         }
992
993         /* unSolicited Responses */
994         if (pring->prt[0].profile) {
995                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
996                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
997                                                                         saveq);
998                 match = 1;
999         } else {
1000                 /* We must search, based on rctl / type
1001                    for the right routine */
1002                 for (i = 0; i < pring->num_mask;
1003                      i++) {
1004                         if ((pring->prt[i].rctl ==
1005                              Rctl)
1006                             && (pring->prt[i].
1007                                 type == Type)) {
1008                                 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1009                                         (pring->prt[i].lpfc_sli_rcv_unsol_event)
1010                                                         (phba, pring, saveq);
1011                                 match = 1;
1012                                 break;
1013                         }
1014                 }
1015         }
1016         if (match == 0) {
1017                 /* Unexpected Rctl / Type received */
1018                 /* Ring <ringno> handler: unexpected
1019                    Rctl <Rctl> Type <Type> received */
1020                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1021                                 "0313 Ring %d handler: unexpected Rctl x%x "
1022                                 "Type x%x received\n",
1023                                 pring->ringno, Rctl, Type);
1024         }
1025         return 1;
1026 }
1027
1028 static struct lpfc_iocbq *
1029 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1030                       struct lpfc_sli_ring *pring,
1031                       struct lpfc_iocbq *prspiocb)
1032 {
1033         struct lpfc_iocbq *cmd_iocb = NULL;
1034         uint16_t iotag;
1035
1036         iotag = prspiocb->iocb.ulpIoTag;
1037
1038         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1039                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1040                 list_del_init(&cmd_iocb->list);
1041                 pring->txcmplq_cnt--;
1042                 return cmd_iocb;
1043         }
1044
1045         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1046                         "0317 iotag x%x is out off "
1047                         "range: max iotag x%x wd0 x%x\n",
1048                         iotag, phba->sli.last_iotag,
1049                         *(((uint32_t *) &prspiocb->iocb) + 7));
1050         return NULL;
1051 }
1052
1053 static int
1054 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1055                           struct lpfc_iocbq *saveq)
1056 {
1057         struct lpfc_iocbq *cmdiocbp;
1058         int rc = 1;
1059         unsigned long iflag;
1060
1061         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
1062         spin_lock_irqsave(&phba->hbalock, iflag);
1063         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1064         spin_unlock_irqrestore(&phba->hbalock, iflag);
1065
1066         if (cmdiocbp) {
1067                 if (cmdiocbp->iocb_cmpl) {
1068                         /*
1069                          * Post all ELS completions to the worker thread.
1070                          * All other are passed to the completion callback.
1071                          */
1072                         if (pring->ringno == LPFC_ELS_RING) {
1073                                 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1074                                         cmdiocbp->iocb_flag &=
1075                                                 ~LPFC_DRIVER_ABORTED;
1076                                         saveq->iocb.ulpStatus =
1077                                                 IOSTAT_LOCAL_REJECT;
1078                                         saveq->iocb.un.ulpWord[4] =
1079                                                 IOERR_SLI_ABORTED;
1080                                 }
1081                         }
1082                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1083                 } else
1084                         lpfc_sli_release_iocbq(phba, cmdiocbp);
1085         } else {
1086                 /*
1087                  * Unknown initiating command based on the response iotag.
1088                  * This could be the case on the ELS ring because of
1089                  * lpfc_els_abort().
1090                  */
1091                 if (pring->ringno != LPFC_ELS_RING) {
1092                         /*
1093                          * Ring <ringno> handler: unexpected completion IoTag
1094                          * <IoTag>
1095                          */
1096                         lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI,
1097                                          "0322 Ring %d handler: "
1098                                          "unexpected completion IoTag x%x "
1099                                          "Data: x%x x%x x%x x%x\n",
1100                                          pring->ringno,
1101                                          saveq->iocb.ulpIoTag,
1102                                          saveq->iocb.ulpStatus,
1103                                          saveq->iocb.un.ulpWord[4],
1104                                          saveq->iocb.ulpCommand,
1105                                          saveq->iocb.ulpContext);
1106                 }
1107         }
1108
1109         return rc;
1110 }
1111
1112 static void
1113 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1114 {
1115         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1116                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1117                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1118         /*
1119          * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1120          * rsp ring <portRspMax>
1121          */
1122         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1123                         "0312 Ring %d handler: portRspPut %d "
1124                         "is bigger then rsp ring %d\n",
1125                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
1126                         pring->numRiocb);
1127
1128         phba->link_state = LPFC_HBA_ERROR;
1129
1130         /*
1131          * All error attention handlers are posted to
1132          * worker thread
1133          */
1134         phba->work_ha |= HA_ERATT;
1135         phba->work_hs = HS_FFER3;
1136
1137         /* hbalock should already be held */
1138         if (phba->work_wait)
1139                 lpfc_worker_wake_up(phba);
1140
1141         return;
1142 }
1143
1144 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1145 {
1146         struct lpfc_sli      *psli  = &phba->sli;
1147         struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1148         IOCB_t *irsp = NULL;
1149         IOCB_t *entry = NULL;
1150         struct lpfc_iocbq *cmdiocbq = NULL;
1151         struct lpfc_iocbq rspiocbq;
1152         struct lpfc_pgp *pgp;
1153         uint32_t status;
1154         uint32_t portRspPut, portRspMax;
1155         int type;
1156         uint32_t rsp_cmpl = 0;
1157         uint32_t ha_copy;
1158         unsigned long iflags;
1159
1160         pring->stats.iocb_event++;
1161
1162         pgp = (phba->sli_rev == 3) ?
1163                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1164                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1165
1166
1167         /*
1168          * The next available response entry should never exceed the maximum
1169          * entries.  If it does, treat it as an adapter hardware error.
1170          */
1171         portRspMax = pring->numRiocb;
1172         portRspPut = le32_to_cpu(pgp->rspPutInx);
1173         if (unlikely(portRspPut >= portRspMax)) {
1174                 lpfc_sli_rsp_pointers_error(phba, pring);
1175                 return;
1176         }
1177
1178         rmb();
1179         while (pring->rspidx != portRspPut) {
1180                 entry = lpfc_resp_iocb(phba, pring);
1181                 if (++pring->rspidx >= portRspMax)
1182                         pring->rspidx = 0;
1183
1184                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1185                                       (uint32_t *) &rspiocbq.iocb,
1186                                       phba->iocb_rsp_size);
1187                 irsp = &rspiocbq.iocb;
1188                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1189                 pring->stats.iocb_rsp++;
1190                 rsp_cmpl++;
1191
1192                 if (unlikely(irsp->ulpStatus)) {
1193                         /* Rsp ring <ringno> error: IOCB */
1194                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1195                                         "0326 Rsp Ring %d error: IOCB Data: "
1196                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1197                                         pring->ringno,
1198                                         irsp->un.ulpWord[0],
1199                                         irsp->un.ulpWord[1],
1200                                         irsp->un.ulpWord[2],
1201                                         irsp->un.ulpWord[3],
1202                                         irsp->un.ulpWord[4],
1203                                         irsp->un.ulpWord[5],
1204                                         *(((uint32_t *) irsp) + 6),
1205                                         *(((uint32_t *) irsp) + 7));
1206                 }
1207
1208                 switch (type) {
1209                 case LPFC_ABORT_IOCB:
1210                 case LPFC_SOL_IOCB:
1211                         /*
1212                          * Idle exchange closed via ABTS from port.  No iocb
1213                          * resources need to be recovered.
1214                          */
1215                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1216                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1217                                                 "0314 IOCB cmd 0x%x "
1218                                                 "processed. Skipping "
1219                                                 "completion",
1220                                                 irsp->ulpCommand);
1221                                 break;
1222                         }
1223
1224                         spin_lock_irqsave(&phba->hbalock, iflags);
1225                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1226                                                          &rspiocbq);
1227                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1228                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1229                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1230                                                       &rspiocbq);
1231                         }
1232                         break;
1233                 default:
1234                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1235                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1236                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1237                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1238                                        MAX_MSG_DATA);
1239                                 dev_warn(&((phba->pcidev)->dev),
1240                                          "lpfc%d: %s\n",
1241                                          phba->brd_no, adaptermsg);
1242                         } else {
1243                                 /* Unknown IOCB command */
1244                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1245                                                 "0321 Unknown IOCB command "
1246                                                 "Data: x%x, x%x x%x x%x x%x\n",
1247                                                 type, irsp->ulpCommand,
1248                                                 irsp->ulpStatus,
1249                                                 irsp->ulpIoTag,
1250                                                 irsp->ulpContext);
1251                         }
1252                         break;
1253                 }
1254
1255                 /*
1256                  * The response IOCB has been processed.  Update the ring
1257                  * pointer in SLIM.  If the port response put pointer has not
1258                  * been updated, sync the pgp->rspPutInx and fetch the new port
1259                  * response put pointer.
1260                  */
1261                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1262
1263                 if (pring->rspidx == portRspPut)
1264                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1265         }
1266
1267         ha_copy = readl(phba->HAregaddr);
1268         ha_copy >>= (LPFC_FCP_RING * 4);
1269
1270         if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1271                 spin_lock_irqsave(&phba->hbalock, iflags);
1272                 pring->stats.iocb_rsp_full++;
1273                 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1274                 writel(status, phba->CAregaddr);
1275                 readl(phba->CAregaddr);
1276                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1277         }
1278         if ((ha_copy & HA_R0CE_RSP) &&
1279             (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1280                 spin_lock_irqsave(&phba->hbalock, iflags);
1281                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1282                 pring->stats.iocb_cmd_empty++;
1283
1284                 /* Force update of the local copy of cmdGetInx */
1285                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1286                 lpfc_sli_resume_iocb(phba, pring);
1287
1288                 if ((pring->lpfc_sli_cmd_available))
1289                         (pring->lpfc_sli_cmd_available) (phba, pring);
1290
1291                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1292         }
1293
1294         return;
1295 }
1296
1297 /*
1298  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1299  * to check it explicitly.
1300  */
1301 static int
1302 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1303                                 struct lpfc_sli_ring *pring, uint32_t mask)
1304 {
1305         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1306                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1307                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1308         IOCB_t *irsp = NULL;
1309         IOCB_t *entry = NULL;
1310         struct lpfc_iocbq *cmdiocbq = NULL;
1311         struct lpfc_iocbq rspiocbq;
1312         uint32_t status;
1313         uint32_t portRspPut, portRspMax;
1314         int rc = 1;
1315         lpfc_iocb_type type;
1316         unsigned long iflag;
1317         uint32_t rsp_cmpl = 0;
1318
1319         spin_lock_irqsave(&phba->hbalock, iflag);
1320         pring->stats.iocb_event++;
1321
1322         /*
1323          * The next available response entry should never exceed the maximum
1324          * entries.  If it does, treat it as an adapter hardware error.
1325          */
1326         portRspMax = pring->numRiocb;
1327         portRspPut = le32_to_cpu(pgp->rspPutInx);
1328         if (unlikely(portRspPut >= portRspMax)) {
1329                 lpfc_sli_rsp_pointers_error(phba, pring);
1330                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1331                 return 1;
1332         }
1333
1334         rmb();
1335         while (pring->rspidx != portRspPut) {
1336                 /*
1337                  * Fetch an entry off the ring and copy it into a local data
1338                  * structure.  The copy involves a byte-swap since the
1339                  * network byte order and pci byte orders are different.
1340                  */
1341                 entry = lpfc_resp_iocb(phba, pring);
1342                 phba->last_completion_time = jiffies;
1343
1344                 if (++pring->rspidx >= portRspMax)
1345                         pring->rspidx = 0;
1346
1347                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1348                                       (uint32_t *) &rspiocbq.iocb,
1349                                       phba->iocb_rsp_size);
1350                 INIT_LIST_HEAD(&(rspiocbq.list));
1351                 irsp = &rspiocbq.iocb;
1352
1353                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1354                 pring->stats.iocb_rsp++;
1355                 rsp_cmpl++;
1356
1357                 if (unlikely(irsp->ulpStatus)) {
1358                         /*
1359                          * If resource errors reported from HBA, reduce
1360                          * queuedepths of the SCSI device.
1361                          */
1362                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1363                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1364                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1365                                 lpfc_adjust_queue_depth(phba);
1366                                 spin_lock_irqsave(&phba->hbalock, iflag);
1367                         }
1368
1369                         /* Rsp ring <ringno> error: IOCB */
1370                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1371                                         "0336 Rsp Ring %d error: IOCB Data: "
1372                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1373                                         pring->ringno,
1374                                         irsp->un.ulpWord[0],
1375                                         irsp->un.ulpWord[1],
1376                                         irsp->un.ulpWord[2],
1377                                         irsp->un.ulpWord[3],
1378                                         irsp->un.ulpWord[4],
1379                                         irsp->un.ulpWord[5],
1380                                         *(((uint32_t *) irsp) + 6),
1381                                         *(((uint32_t *) irsp) + 7));
1382                 }
1383
1384                 switch (type) {
1385                 case LPFC_ABORT_IOCB:
1386                 case LPFC_SOL_IOCB:
1387                         /*
1388                          * Idle exchange closed via ABTS from port.  No iocb
1389                          * resources need to be recovered.
1390                          */
1391                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1392                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1393                                                 "0333 IOCB cmd 0x%x"
1394                                                 " processed. Skipping"
1395                                                 " completion\n",
1396                                                 irsp->ulpCommand);
1397                                 break;
1398                         }
1399
1400                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1401                                                          &rspiocbq);
1402                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1403                                 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1404                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1405                                                               &rspiocbq);
1406                                 } else {
1407                                         spin_unlock_irqrestore(&phba->hbalock,
1408                                                                iflag);
1409                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1410                                                               &rspiocbq);
1411                                         spin_lock_irqsave(&phba->hbalock,
1412                                                           iflag);
1413                                 }
1414                         }
1415                         break;
1416                 case LPFC_UNSOL_IOCB:
1417                         spin_unlock_irqrestore(&phba->hbalock, iflag);
1418                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1419                         spin_lock_irqsave(&phba->hbalock, iflag);
1420                         break;
1421                 default:
1422                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1423                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1424                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1425                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1426                                        MAX_MSG_DATA);
1427                                 dev_warn(&((phba->pcidev)->dev),
1428                                          "lpfc%d: %s\n",
1429                                          phba->brd_no, adaptermsg);
1430                         } else {
1431                                 /* Unknown IOCB command */
1432                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1433                                                 "0334 Unknown IOCB command "
1434                                                 "Data: x%x, x%x x%x x%x x%x\n",
1435                                                 type, irsp->ulpCommand,
1436                                                 irsp->ulpStatus,
1437                                                 irsp->ulpIoTag,
1438                                                 irsp->ulpContext);
1439                         }
1440                         break;
1441                 }
1442
1443                 /*
1444                  * The response IOCB has been processed.  Update the ring
1445                  * pointer in SLIM.  If the port response put pointer has not
1446                  * been updated, sync the pgp->rspPutInx and fetch the new port
1447                  * response put pointer.
1448                  */
1449                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1450
1451                 if (pring->rspidx == portRspPut)
1452                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1453         }
1454
1455         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1456                 pring->stats.iocb_rsp_full++;
1457                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1458                 writel(status, phba->CAregaddr);
1459                 readl(phba->CAregaddr);
1460         }
1461         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1462                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1463                 pring->stats.iocb_cmd_empty++;
1464
1465                 /* Force update of the local copy of cmdGetInx */
1466                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1467                 lpfc_sli_resume_iocb(phba, pring);
1468
1469                 if ((pring->lpfc_sli_cmd_available))
1470                         (pring->lpfc_sli_cmd_available) (phba, pring);
1471
1472         }
1473
1474         spin_unlock_irqrestore(&phba->hbalock, iflag);
1475         return rc;
1476 }
1477
1478 int
1479 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1480                                 struct lpfc_sli_ring *pring, uint32_t mask)
1481 {
1482         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1483                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1484                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1485         IOCB_t *entry;
1486         IOCB_t *irsp = NULL;
1487         struct lpfc_iocbq *rspiocbp = NULL;
1488         struct lpfc_iocbq *next_iocb;
1489         struct lpfc_iocbq *cmdiocbp;
1490         struct lpfc_iocbq *saveq;
1491         uint8_t iocb_cmd_type;
1492         lpfc_iocb_type type;
1493         uint32_t status, free_saveq;
1494         uint32_t portRspPut, portRspMax;
1495         int rc = 1;
1496         unsigned long iflag;
1497
1498         spin_lock_irqsave(&phba->hbalock, iflag);
1499         pring->stats.iocb_event++;
1500
1501         /*
1502          * The next available response entry should never exceed the maximum
1503          * entries.  If it does, treat it as an adapter hardware error.
1504          */
1505         portRspMax = pring->numRiocb;
1506         portRspPut = le32_to_cpu(pgp->rspPutInx);
1507         if (portRspPut >= portRspMax) {
1508                 /*
1509                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1510                  * rsp ring <portRspMax>
1511                  */
1512                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1513                                 "0303 Ring %d handler: portRspPut %d "
1514                                 "is bigger then rsp ring %d\n",
1515                                 pring->ringno, portRspPut, portRspMax);
1516
1517                 phba->link_state = LPFC_HBA_ERROR;
1518                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1519
1520                 phba->work_hs = HS_FFER3;
1521                 lpfc_handle_eratt(phba);
1522
1523                 return 1;
1524         }
1525
1526         rmb();
1527         while (pring->rspidx != portRspPut) {
1528                 /*
1529                  * Build a completion list and call the appropriate handler.
1530                  * The process is to get the next available response iocb, get
1531                  * a free iocb from the list, copy the response data into the
1532                  * free iocb, insert to the continuation list, and update the
1533                  * next response index to slim.  This process makes response
1534                  * iocb's in the ring available to DMA as fast as possible but
1535                  * pays a penalty for a copy operation.  Since the iocb is
1536                  * only 32 bytes, this penalty is considered small relative to
1537                  * the PCI reads for register values and a slim write.  When
1538                  * the ulpLe field is set, the entire Command has been
1539                  * received.
1540                  */
1541                 entry = lpfc_resp_iocb(phba, pring);
1542
1543                 phba->last_completion_time = jiffies;
1544                 rspiocbp = __lpfc_sli_get_iocbq(phba);
1545                 if (rspiocbp == NULL) {
1546                         printk(KERN_ERR "%s: out of buffers! Failing "
1547                                "completion.\n", __FUNCTION__);
1548                         break;
1549                 }
1550
1551                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1552                                       phba->iocb_rsp_size);
1553                 irsp = &rspiocbp->iocb;
1554
1555                 if (++pring->rspidx >= portRspMax)
1556                         pring->rspidx = 0;
1557
1558                 if (pring->ringno == LPFC_ELS_RING) {
1559                         lpfc_debugfs_slow_ring_trc(phba,
1560                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1561                                 *(((uint32_t *) irsp) + 4),
1562                                 *(((uint32_t *) irsp) + 6),
1563                                 *(((uint32_t *) irsp) + 7));
1564                 }
1565
1566                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1567
1568                 if (list_empty(&(pring->iocb_continueq))) {
1569                         list_add(&rspiocbp->list, &(pring->iocb_continueq));
1570                 } else {
1571                         list_add_tail(&rspiocbp->list,
1572                                       &(pring->iocb_continueq));
1573                 }
1574
1575                 pring->iocb_continueq_cnt++;
1576                 if (irsp->ulpLe) {
1577                         /*
1578                          * By default, the driver expects to free all resources
1579                          * associated with this iocb completion.
1580                          */
1581                         free_saveq = 1;
1582                         saveq = list_get_first(&pring->iocb_continueq,
1583                                                struct lpfc_iocbq, list);
1584                         irsp = &(saveq->iocb);
1585                         list_del_init(&pring->iocb_continueq);
1586                         pring->iocb_continueq_cnt = 0;
1587
1588                         pring->stats.iocb_rsp++;
1589
1590                         /*
1591                          * If resource errors reported from HBA, reduce
1592                          * queuedepths of the SCSI device.
1593                          */
1594                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1595                              (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1596                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1597                                 lpfc_adjust_queue_depth(phba);
1598                                 spin_lock_irqsave(&phba->hbalock, iflag);
1599                         }
1600
1601                         if (irsp->ulpStatus) {
1602                                 /* Rsp ring <ringno> error: IOCB */
1603                                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1604                                                 "0328 Rsp Ring %d error: "
1605                                                 "IOCB Data: "
1606                                                 "x%x x%x x%x x%x "
1607                                                 "x%x x%x x%x x%x "
1608                                                 "x%x x%x x%x x%x "
1609                                                 "x%x x%x x%x x%x\n",
1610                                                 pring->ringno,
1611                                                 irsp->un.ulpWord[0],
1612                                                 irsp->un.ulpWord[1],
1613                                                 irsp->un.ulpWord[2],
1614                                                 irsp->un.ulpWord[3],
1615                                                 irsp->un.ulpWord[4],
1616                                                 irsp->un.ulpWord[5],
1617                                                 *(((uint32_t *) irsp) + 6),
1618                                                 *(((uint32_t *) irsp) + 7),
1619                                                 *(((uint32_t *) irsp) + 8),
1620                                                 *(((uint32_t *) irsp) + 9),
1621                                                 *(((uint32_t *) irsp) + 10),
1622                                                 *(((uint32_t *) irsp) + 11),
1623                                                 *(((uint32_t *) irsp) + 12),
1624                                                 *(((uint32_t *) irsp) + 13),
1625                                                 *(((uint32_t *) irsp) + 14),
1626                                                 *(((uint32_t *) irsp) + 15));
1627                         }
1628
1629                         /*
1630                          * Fetch the IOCB command type and call the correct
1631                          * completion routine.  Solicited and Unsolicited
1632                          * IOCBs on the ELS ring get freed back to the
1633                          * lpfc_iocb_list by the discovery kernel thread.
1634                          */
1635                         iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1636                         type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1637                         if (type == LPFC_SOL_IOCB) {
1638                                 spin_unlock_irqrestore(&phba->hbalock,
1639                                                        iflag);
1640                                 rc = lpfc_sli_process_sol_iocb(phba, pring,
1641                                                                saveq);
1642                                 spin_lock_irqsave(&phba->hbalock, iflag);
1643                         } else if (type == LPFC_UNSOL_IOCB) {
1644                                 spin_unlock_irqrestore(&phba->hbalock,
1645                                                        iflag);
1646                                 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1647                                                                  saveq);
1648                                 spin_lock_irqsave(&phba->hbalock, iflag);
1649                         } else if (type == LPFC_ABORT_IOCB) {
1650                                 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1651                                     ((cmdiocbp =
1652                                       lpfc_sli_iocbq_lookup(phba, pring,
1653                                                             saveq)))) {
1654                                         /* Call the specified completion
1655                                            routine */
1656                                         if (cmdiocbp->iocb_cmpl) {
1657                                                 spin_unlock_irqrestore(
1658                                                        &phba->hbalock,
1659                                                        iflag);
1660                                                 (cmdiocbp->iocb_cmpl) (phba,
1661                                                              cmdiocbp, saveq);
1662                                                 spin_lock_irqsave(
1663                                                           &phba->hbalock,
1664                                                           iflag);
1665                                         } else
1666                                                 __lpfc_sli_release_iocbq(phba,
1667                                                                       cmdiocbp);
1668                                 }
1669                         } else if (type == LPFC_UNKNOWN_IOCB) {
1670                                 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1671
1672                                         char adaptermsg[LPFC_MAX_ADPTMSG];
1673
1674                                         memset(adaptermsg, 0,
1675                                                LPFC_MAX_ADPTMSG);
1676                                         memcpy(&adaptermsg[0], (uint8_t *) irsp,
1677                                                MAX_MSG_DATA);
1678                                         dev_warn(&((phba->pcidev)->dev),
1679                                                  "lpfc%d: %s\n",
1680                                                  phba->brd_no, adaptermsg);
1681                                 } else {
1682                                         /* Unknown IOCB command */
1683                                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1684                                                         "0335 Unknown IOCB "
1685                                                         "command Data: x%x "
1686                                                         "x%x x%x x%x\n",
1687                                                         irsp->ulpCommand,
1688                                                         irsp->ulpStatus,
1689                                                         irsp->ulpIoTag,
1690                                                         irsp->ulpContext);
1691                                 }
1692                         }
1693
1694                         if (free_saveq) {
1695                                 list_for_each_entry_safe(rspiocbp, next_iocb,
1696                                                          &saveq->list, list) {
1697                                         list_del(&rspiocbp->list);
1698                                         __lpfc_sli_release_iocbq(phba,
1699                                                                  rspiocbp);
1700                                 }
1701                                 __lpfc_sli_release_iocbq(phba, saveq);
1702                         }
1703                         rspiocbp = NULL;
1704                 }
1705
1706                 /*
1707                  * If the port response put pointer has not been updated, sync
1708                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1709                  * response put pointer.
1710                  */
1711                 if (pring->rspidx == portRspPut) {
1712                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1713                 }
1714         } /* while (pring->rspidx != portRspPut) */
1715
1716         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1717                 /* At least one response entry has been freed */
1718                 pring->stats.iocb_rsp_full++;
1719                 /* SET RxRE_RSP in Chip Att register */
1720                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1721                 writel(status, phba->CAregaddr);
1722                 readl(phba->CAregaddr); /* flush */
1723         }
1724         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1725                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1726                 pring->stats.iocb_cmd_empty++;
1727
1728                 /* Force update of the local copy of cmdGetInx */
1729                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1730                 lpfc_sli_resume_iocb(phba, pring);
1731
1732                 if ((pring->lpfc_sli_cmd_available))
1733                         (pring->lpfc_sli_cmd_available) (phba, pring);
1734
1735         }
1736
1737         spin_unlock_irqrestore(&phba->hbalock, iflag);
1738         return rc;
1739 }
1740
1741 void
1742 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1743 {
1744         LIST_HEAD(completions);
1745         struct lpfc_iocbq *iocb, *next_iocb;
1746         IOCB_t *cmd = NULL;
1747
1748         if (pring->ringno == LPFC_ELS_RING) {
1749                 lpfc_fabric_abort_hba(phba);
1750         }
1751
1752         /* Error everything on txq and txcmplq
1753          * First do the txq.
1754          */
1755         spin_lock_irq(&phba->hbalock);
1756         list_splice_init(&pring->txq, &completions);
1757         pring->txq_cnt = 0;
1758
1759         /* Next issue ABTS for everything on the txcmplq */
1760         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1761                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1762
1763         spin_unlock_irq(&phba->hbalock);
1764
1765         while (!list_empty(&completions)) {
1766                 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1767                 cmd = &iocb->iocb;
1768                 list_del_init(&iocb->list);
1769
1770                 if (!iocb->iocb_cmpl)
1771                         lpfc_sli_release_iocbq(phba, iocb);
1772                 else {
1773                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1774                         cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1775                         (iocb->iocb_cmpl) (phba, iocb, iocb);
1776                 }
1777         }
1778 }
1779
1780 int
1781 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1782 {
1783         uint32_t status;
1784         int i = 0;
1785         int retval = 0;
1786
1787         /* Read the HBA Host Status Register */
1788         status = readl(phba->HSregaddr);
1789
1790         /*
1791          * Check status register every 100ms for 5 retries, then every
1792          * 500ms for 5, then every 2.5 sec for 5, then reset board and
1793          * every 2.5 sec for 4.
1794          * Break our of the loop if errors occurred during init.
1795          */
1796         while (((status & mask) != mask) &&
1797                !(status & HS_FFERM) &&
1798                i++ < 20) {
1799
1800                 if (i <= 5)
1801                         msleep(10);
1802                 else if (i <= 10)
1803                         msleep(500);
1804                 else
1805                         msleep(2500);
1806
1807                 if (i == 15) {
1808                                 /* Do post */
1809                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1810                         lpfc_sli_brdrestart(phba);
1811                 }
1812                 /* Read the HBA Host Status Register */
1813                 status = readl(phba->HSregaddr);
1814         }
1815
1816         /* Check to see if any errors occurred during init */
1817         if ((status & HS_FFERM) || (i >= 20)) {
1818                 phba->link_state = LPFC_HBA_ERROR;
1819                 retval = 1;
1820         }
1821
1822         return retval;
1823 }
1824
1825 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1826
1827 void lpfc_reset_barrier(struct lpfc_hba *phba)
1828 {
1829         uint32_t __iomem *resp_buf;
1830         uint32_t __iomem *mbox_buf;
1831         volatile uint32_t mbox;
1832         uint32_t hc_copy;
1833         int  i;
1834         uint8_t hdrtype;
1835
1836         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1837         if (hdrtype != 0x80 ||
1838             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1839              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1840                 return;
1841
1842         /*
1843          * Tell the other part of the chip to suspend temporarily all
1844          * its DMA activity.
1845          */
1846         resp_buf = phba->MBslimaddr;
1847
1848         /* Disable the error attention */
1849         hc_copy = readl(phba->HCregaddr);
1850         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1851         readl(phba->HCregaddr); /* flush */
1852         phba->link_flag |= LS_IGNORE_ERATT;
1853
1854         if (readl(phba->HAregaddr) & HA_ERATT) {
1855                 /* Clear Chip error bit */
1856                 writel(HA_ERATT, phba->HAregaddr);
1857                 phba->pport->stopped = 1;
1858         }
1859
1860         mbox = 0;
1861         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1862         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1863
1864         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1865         mbox_buf = phba->MBslimaddr;
1866         writel(mbox, mbox_buf);
1867
1868         for (i = 0;
1869              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1870                 mdelay(1);
1871
1872         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1873                 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1874                     phba->pport->stopped)
1875                         goto restore_hc;
1876                 else
1877                         goto clear_errat;
1878         }
1879
1880         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1881         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1882                 mdelay(1);
1883
1884 clear_errat:
1885
1886         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1887                 mdelay(1);
1888
1889         if (readl(phba->HAregaddr) & HA_ERATT) {
1890                 writel(HA_ERATT, phba->HAregaddr);
1891                 phba->pport->stopped = 1;
1892         }
1893
1894 restore_hc:
1895         phba->link_flag &= ~LS_IGNORE_ERATT;
1896         writel(hc_copy, phba->HCregaddr);
1897         readl(phba->HCregaddr); /* flush */
1898 }
1899
1900 int
1901 lpfc_sli_brdkill(struct lpfc_hba *phba)
1902 {
1903         struct lpfc_sli *psli;
1904         LPFC_MBOXQ_t *pmb;
1905         uint32_t status;
1906         uint32_t ha_copy;
1907         int retval;
1908         int i = 0;
1909
1910         psli = &phba->sli;
1911
1912         /* Kill HBA */
1913         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1914                         "0329 Kill HBA Data: x%x x%x\n",
1915                         phba->pport->port_state, psli->sli_flag);
1916
1917         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1918         if (!pmb)
1919                 return 1;
1920
1921         /* Disable the error attention */
1922         spin_lock_irq(&phba->hbalock);
1923         status = readl(phba->HCregaddr);
1924         status &= ~HC_ERINT_ENA;
1925         writel(status, phba->HCregaddr);
1926         readl(phba->HCregaddr); /* flush */
1927         phba->link_flag |= LS_IGNORE_ERATT;
1928         spin_unlock_irq(&phba->hbalock);
1929
1930         lpfc_kill_board(phba, pmb);
1931         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1932         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1933
1934         if (retval != MBX_SUCCESS) {
1935                 if (retval != MBX_BUSY)
1936                         mempool_free(pmb, phba->mbox_mem_pool);
1937                 spin_lock_irq(&phba->hbalock);
1938                 phba->link_flag &= ~LS_IGNORE_ERATT;
1939                 spin_unlock_irq(&phba->hbalock);
1940                 return 1;
1941         }
1942
1943         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1944
1945         mempool_free(pmb, phba->mbox_mem_pool);
1946
1947         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1948          * attention every 100ms for 3 seconds. If we don't get ERATT after
1949          * 3 seconds we still set HBA_ERROR state because the status of the
1950          * board is now undefined.
1951          */
1952         ha_copy = readl(phba->HAregaddr);
1953
1954         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1955                 mdelay(100);
1956                 ha_copy = readl(phba->HAregaddr);
1957         }
1958
1959         del_timer_sync(&psli->mbox_tmo);
1960         if (ha_copy & HA_ERATT) {
1961                 writel(HA_ERATT, phba->HAregaddr);
1962                 phba->pport->stopped = 1;
1963         }
1964         spin_lock_irq(&phba->hbalock);
1965         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1966         phba->link_flag &= ~LS_IGNORE_ERATT;
1967         spin_unlock_irq(&phba->hbalock);
1968
1969         psli->mbox_active = NULL;
1970         lpfc_hba_down_post(phba);
1971         phba->link_state = LPFC_HBA_ERROR;
1972
1973         return ha_copy & HA_ERATT ? 0 : 1;
1974 }
1975
1976 int
1977 lpfc_sli_brdreset(struct lpfc_hba *phba)
1978 {
1979         struct lpfc_sli *psli;
1980         struct lpfc_sli_ring *pring;
1981         uint16_t cfg_value;
1982         int i;
1983
1984         psli = &phba->sli;
1985
1986         /* Reset HBA */
1987         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1988                         "0325 Reset HBA Data: x%x x%x\n",
1989                         phba->pport->port_state, psli->sli_flag);
1990
1991         /* perform board reset */
1992         phba->fc_eventTag = 0;
1993         phba->pport->fc_myDID = 0;
1994         phba->pport->fc_prevDID = 0;
1995
1996         /* Turn off parity checking and serr during the physical reset */
1997         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1998         pci_write_config_word(phba->pcidev, PCI_COMMAND,
1999                               (cfg_value &
2000                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2001
2002         psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
2003         /* Now toggle INITFF bit in the Host Control Register */
2004         writel(HC_INITFF, phba->HCregaddr);
2005         mdelay(1);
2006         readl(phba->HCregaddr); /* flush */
2007         writel(0, phba->HCregaddr);
2008         readl(phba->HCregaddr); /* flush */
2009
2010         /* Restore PCI cmd register */
2011         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
2012
2013         /* Initialize relevant SLI info */
2014         for (i = 0; i < psli->num_rings; i++) {
2015                 pring = &psli->ring[i];
2016                 pring->flag = 0;
2017                 pring->rspidx = 0;
2018                 pring->next_cmdidx  = 0;
2019                 pring->local_getidx = 0;
2020                 pring->cmdidx = 0;
2021                 pring->missbufcnt = 0;
2022         }
2023
2024         phba->link_state = LPFC_WARM_START;
2025         return 0;
2026 }
2027
2028 int
2029 lpfc_sli_brdrestart(struct lpfc_hba *phba)
2030 {
2031         MAILBOX_t *mb;
2032         struct lpfc_sli *psli;
2033         uint16_t skip_post;
2034         volatile uint32_t word0;
2035         void __iomem *to_slim;
2036
2037         spin_lock_irq(&phba->hbalock);
2038
2039         psli = &phba->sli;
2040
2041         /* Restart HBA */
2042         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2043                         "0337 Restart HBA Data: x%x x%x\n",
2044                         phba->pport->port_state, psli->sli_flag);
2045
2046         word0 = 0;
2047         mb = (MAILBOX_t *) &word0;
2048         mb->mbxCommand = MBX_RESTART;
2049         mb->mbxHc = 1;
2050
2051         lpfc_reset_barrier(phba);
2052
2053         to_slim = phba->MBslimaddr;
2054         writel(*(uint32_t *) mb, to_slim);
2055         readl(to_slim); /* flush */
2056
2057         /* Only skip post after fc_ffinit is completed */
2058         if (phba->pport->port_state) {
2059                 skip_post = 1;
2060                 word0 = 1;      /* This is really setting up word1 */
2061         } else {
2062                 skip_post = 0;
2063                 word0 = 0;      /* This is really setting up word1 */
2064         }
2065         to_slim = phba->MBslimaddr + sizeof (uint32_t);
2066         writel(*(uint32_t *) mb, to_slim);
2067         readl(to_slim); /* flush */
2068
2069         lpfc_sli_brdreset(phba);
2070         phba->pport->stopped = 0;
2071         phba->link_state = LPFC_INIT_START;
2072
2073         spin_unlock_irq(&phba->hbalock);
2074
2075         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2076         psli->stats_start = get_seconds();
2077
2078         if (skip_post)
2079                 mdelay(100);
2080         else
2081                 mdelay(2000);
2082
2083         lpfc_hba_down_post(phba);
2084
2085         return 0;
2086 }
2087
2088 static int
2089 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2090 {
2091         uint32_t status, i = 0;
2092
2093         /* Read the HBA Host Status Register */
2094         status = readl(phba->HSregaddr);
2095
2096         /* Check status register to see what current state is */
2097         i = 0;
2098         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2099
2100                 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2101                  * every 2.5 sec for 5, then reset board and every 2.5 sec for
2102                  * 4.
2103                  */
2104                 if (i++ >= 20) {
2105                         /* Adapter failed to init, timeout, status reg
2106                            <status> */
2107                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2108                                         "0436 Adapter failed to init, "
2109                                         "timeout, status reg x%x\n", status);
2110                         phba->link_state = LPFC_HBA_ERROR;
2111                         return -ETIMEDOUT;
2112                 }
2113
2114                 /* Check to see if any errors occurred during init */
2115                 if (status & HS_FFERM) {
2116                         /* ERROR: During chipset initialization */
2117                         /* Adapter failed to init, chipset, status reg
2118                            <status> */
2119                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2120                                         "0437 Adapter failed to init, "
2121                                         "chipset, status reg x%x\n", status);
2122                         phba->link_state = LPFC_HBA_ERROR;
2123                         return -EIO;
2124                 }
2125
2126                 if (i <= 5) {
2127                         msleep(10);
2128                 } else if (i <= 10) {
2129                         msleep(500);
2130                 } else {
2131                         msleep(2500);
2132                 }
2133
2134                 if (i == 15) {
2135                                 /* Do post */
2136                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2137                         lpfc_sli_brdrestart(phba);
2138                 }
2139                 /* Read the HBA Host Status Register */
2140                 status = readl(phba->HSregaddr);
2141         }
2142
2143         /* Check to see if any errors occurred during init */
2144         if (status & HS_FFERM) {
2145                 /* ERROR: During chipset initialization */
2146                 /* Adapter failed to init, chipset, status reg <status> */
2147                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2148                                 "0438 Adapter failed to init, chipset, "
2149                                 "status reg x%x\n", status);
2150                 phba->link_state = LPFC_HBA_ERROR;
2151                 return -EIO;
2152         }
2153
2154         /* Clear all interrupt enable conditions */
2155         writel(0, phba->HCregaddr);
2156         readl(phba->HCregaddr); /* flush */
2157
2158         /* setup host attn register */
2159         writel(0xffffffff, phba->HAregaddr);
2160         readl(phba->HAregaddr); /* flush */
2161         return 0;
2162 }
2163
2164 int
2165 lpfc_sli_hbq_count(void)
2166 {
2167         return ARRAY_SIZE(lpfc_hbq_defs);
2168 }
2169
2170 static int
2171 lpfc_sli_hbq_entry_count(void)
2172 {
2173         int  hbq_count = lpfc_sli_hbq_count();
2174         int  count = 0;
2175         int  i;
2176
2177         for (i = 0; i < hbq_count; ++i)
2178                 count += lpfc_hbq_defs[i]->entry_count;
2179         return count;
2180 }
2181
2182 int
2183 lpfc_sli_hbq_size(void)
2184 {
2185         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2186 }
2187
2188 static int
2189 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2190 {
2191         int  hbq_count = lpfc_sli_hbq_count();
2192         LPFC_MBOXQ_t *pmb;
2193         MAILBOX_t *pmbox;
2194         uint32_t hbqno;
2195         uint32_t hbq_entry_index;
2196
2197                                 /* Get a Mailbox buffer to setup mailbox
2198                                  * commands for HBA initialization
2199                                  */
2200         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2201
2202         if (!pmb)
2203                 return -ENOMEM;
2204
2205         pmbox = &pmb->mb;
2206
2207         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2208         phba->link_state = LPFC_INIT_MBX_CMDS;
2209
2210         hbq_entry_index = 0;
2211         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2212                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2213                 phba->hbqs[hbqno].hbqPutIdx      = 0;
2214                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
2215                 phba->hbqs[hbqno].entry_count =
2216                         lpfc_hbq_defs[hbqno]->entry_count;
2217                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
2218                         hbq_entry_index, pmb);
2219                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2220
2221                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2222                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2223                            mbxStatus <status>, ring <num> */
2224
2225                         lpfc_printf_log(phba, KERN_ERR,
2226                                         LOG_SLI | LOG_VPORT,
2227                                         "1805 Adapter failed to init. "
2228                                         "Data: x%x x%x x%x\n",
2229                                         pmbox->mbxCommand,
2230                                         pmbox->mbxStatus, hbqno);
2231
2232                         phba->link_state = LPFC_HBA_ERROR;
2233                         mempool_free(pmb, phba->mbox_mem_pool);
2234                         return ENXIO;
2235                 }
2236         }
2237         phba->hbq_count = hbq_count;
2238
2239         mempool_free(pmb, phba->mbox_mem_pool);
2240
2241         /* Initially populate or replenish the HBQs */
2242         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2243                 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2244                         return -ENOMEM;
2245         }
2246         return 0;
2247 }
2248
2249 static int
2250 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2251 {
2252         LPFC_MBOXQ_t *pmb;
2253         uint32_t resetcount = 0, rc = 0, done = 0;
2254
2255         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2256         if (!pmb) {
2257                 phba->link_state = LPFC_HBA_ERROR;
2258                 return -ENOMEM;
2259         }
2260
2261         phba->sli_rev = sli_mode;
2262         while (resetcount < 2 && !done) {
2263                 spin_lock_irq(&phba->hbalock);
2264                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2265                 spin_unlock_irq(&phba->hbalock);
2266                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2267                 lpfc_sli_brdrestart(phba);
2268                 msleep(2500);
2269                 rc = lpfc_sli_chipset_init(phba);
2270                 if (rc)
2271                         break;
2272
2273                 spin_lock_irq(&phba->hbalock);
2274                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2275                 spin_unlock_irq(&phba->hbalock);
2276                 resetcount++;
2277
2278                 /* Call pre CONFIG_PORT mailbox command initialization.  A
2279                  * value of 0 means the call was successful.  Any other
2280                  * nonzero value is a failure, but if ERESTART is returned,
2281                  * the driver may reset the HBA and try again.
2282                  */
2283                 rc = lpfc_config_port_prep(phba);
2284                 if (rc == -ERESTART) {
2285                         phba->link_state = LPFC_LINK_UNKNOWN;
2286                         continue;
2287                 } else if (rc) {
2288                         break;
2289                 }
2290
2291                 phba->link_state = LPFC_INIT_MBX_CMDS;
2292                 lpfc_config_port(phba, pmb);
2293                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2294                 if (rc != MBX_SUCCESS) {
2295                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2296                                 "0442 Adapter failed to init, mbxCmd x%x "
2297                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2298                                 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
2299                         spin_lock_irq(&phba->hbalock);
2300                         phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2301                         spin_unlock_irq(&phba->hbalock);
2302                         rc = -ENXIO;
2303                 } else {
2304                         done = 1;
2305                         phba->max_vpi = (phba->max_vpi &&
2306                                          pmb->mb.un.varCfgPort.gmv) != 0
2307                                 ? pmb->mb.un.varCfgPort.max_vpi
2308                                 : 0;
2309                 }
2310         }
2311
2312         if (!done) {
2313                 rc = -EINVAL;
2314                 goto do_prep_failed;
2315         }
2316
2317         if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2318                 (!pmb->mb.un.varCfgPort.cMA)) {
2319                 rc = -ENXIO;
2320                 goto do_prep_failed;
2321         }
2322         return rc;
2323
2324 do_prep_failed:
2325         mempool_free(pmb, phba->mbox_mem_pool);
2326         return rc;
2327 }
2328
2329 int
2330 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2331 {
2332         uint32_t rc;
2333         int  mode = 3;
2334
2335         switch (lpfc_sli_mode) {
2336         case 2:
2337                 if (phba->cfg_enable_npiv) {
2338                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2339                                 "1824 NPIV enabled: Override lpfc_sli_mode "
2340                                 "parameter (%d) to auto (0).\n",
2341                                 lpfc_sli_mode);
2342                         break;
2343                 }
2344                 mode = 2;
2345                 break;
2346         case 0:
2347         case 3:
2348                 break;
2349         default:
2350                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2351                                 "1819 Unrecognized lpfc_sli_mode "
2352                                 "parameter: %d.\n", lpfc_sli_mode);
2353
2354                 break;
2355         }
2356
2357         rc = lpfc_do_config_port(phba, mode);
2358         if (rc && lpfc_sli_mode == 3)
2359                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2360                                 "1820 Unable to select SLI-3.  "
2361                                 "Not supported by adapter.\n");
2362         if (rc && mode != 2)
2363                 rc = lpfc_do_config_port(phba, 2);
2364         if (rc)
2365                 goto lpfc_sli_hba_setup_error;
2366
2367         if (phba->sli_rev == 3) {
2368                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2369                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2370                 phba->sli3_options |= LPFC_SLI3_ENABLED;
2371                 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2372
2373         } else {
2374                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2375                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2376                 phba->sli3_options = 0;
2377         }
2378
2379         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2380                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
2381                         phba->sli_rev, phba->max_vpi);
2382         rc = lpfc_sli_ring_map(phba);
2383
2384         if (rc)
2385                 goto lpfc_sli_hba_setup_error;
2386
2387                                 /* Init HBQs */
2388
2389         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2390                 rc = lpfc_sli_hbq_setup(phba);
2391                 if (rc)
2392                         goto lpfc_sli_hba_setup_error;
2393         }
2394
2395         phba->sli.sli_flag |= LPFC_PROCESS_LA;
2396
2397         rc = lpfc_config_port_post(phba);
2398         if (rc)
2399                 goto lpfc_sli_hba_setup_error;
2400
2401         return rc;
2402
2403 lpfc_sli_hba_setup_error:
2404         phba->link_state = LPFC_HBA_ERROR;
2405         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2406                         "0445 Firmware initialization failed\n");
2407         return rc;
2408 }
2409
2410 /*! lpfc_mbox_timeout
2411  *
2412  * \pre
2413  * \post
2414  * \param hba Pointer to per struct lpfc_hba structure
2415  * \param l1  Pointer to the driver's mailbox queue.
2416  * \return
2417  *   void
2418  *
2419  * \b Description:
2420  *
2421  * This routine handles mailbox timeout events at timer interrupt context.
2422  */
2423 void
2424 lpfc_mbox_timeout(unsigned long ptr)
2425 {
2426         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
2427         unsigned long iflag;
2428         uint32_t tmo_posted;
2429
2430         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2431         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2432         if (!tmo_posted)
2433                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2434         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2435
2436         if (!tmo_posted) {
2437                 spin_lock_irqsave(&phba->hbalock, iflag);
2438                 if (phba->work_wait)
2439                         lpfc_worker_wake_up(phba);
2440                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2441         }
2442 }
2443
2444 void
2445 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2446 {
2447         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2448         MAILBOX_t *mb = &pmbox->mb;
2449         struct lpfc_sli *psli = &phba->sli;
2450         struct lpfc_sli_ring *pring;
2451
2452         if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2453                 return;
2454         }
2455
2456         /* Mbox cmd <mbxCommand> timeout */
2457         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2458                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2459                         mb->mbxCommand,
2460                         phba->pport->port_state,
2461                         phba->sli.sli_flag,
2462                         phba->sli.mbox_active);
2463
2464         /* Setting state unknown so lpfc_sli_abort_iocb_ring
2465          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2466          * it to fail all oustanding SCSI IO.
2467          */
2468         spin_lock_irq(&phba->pport->work_port_lock);
2469         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2470         spin_unlock_irq(&phba->pport->work_port_lock);
2471         spin_lock_irq(&phba->hbalock);
2472         phba->link_state = LPFC_LINK_UNKNOWN;
2473         phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2474         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2475         spin_unlock_irq(&phba->hbalock);
2476
2477         pring = &psli->ring[psli->fcp_ring];
2478         lpfc_sli_abort_iocb_ring(phba, pring);
2479
2480         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2481                         "0316 Resetting board due to mailbox timeout\n");
2482         /*
2483          * lpfc_offline calls lpfc_sli_hba_down which will clean up
2484          * on oustanding mailbox commands.
2485          */
2486         lpfc_offline_prep(phba);
2487         lpfc_offline(phba);
2488         lpfc_sli_brdrestart(phba);
2489         if (lpfc_online(phba) == 0)             /* Initialize the HBA */
2490                 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2491         lpfc_unblock_mgmt_io(phba);
2492         return;
2493 }
2494
2495 int
2496 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2497 {
2498         MAILBOX_t *mb;
2499         struct lpfc_sli *psli = &phba->sli;
2500         uint32_t status, evtctr;
2501         uint32_t ha_copy;
2502         int i;
2503         unsigned long drvr_flag = 0;
2504         volatile uint32_t word0, ldata;
2505         void __iomem *to_slim;
2506
2507         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2508                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2509                 if(!pmbox->vport) {
2510                         lpfc_printf_log(phba, KERN_ERR,
2511                                         LOG_MBOX | LOG_VPORT,
2512                                         "1806 Mbox x%x failed. No vport\n",
2513                                         pmbox->mb.mbxCommand);
2514                         dump_stack();
2515                         return MBXERR_ERROR;
2516                 }
2517         }
2518
2519
2520         /* If the PCI channel is in offline state, do not post mbox. */
2521         if (unlikely(pci_channel_offline(phba->pcidev)))
2522                 return MBX_NOT_FINISHED;
2523
2524         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2525         psli = &phba->sli;
2526
2527
2528         mb = &pmbox->mb;
2529         status = MBX_SUCCESS;
2530
2531         if (phba->link_state == LPFC_HBA_ERROR) {
2532                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2533
2534                 /* Mbox command <mbxCommand> cannot issue */
2535                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2536                 return MBX_NOT_FINISHED;
2537         }
2538
2539         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2540             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2541                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2542                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2543                 return MBX_NOT_FINISHED;
2544         }
2545
2546         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2547                 /* Polling for a mbox command when another one is already active
2548                  * is not allowed in SLI. Also, the driver must have established
2549                  * SLI2 mode to queue and process multiple mbox commands.
2550                  */
2551
2552                 if (flag & MBX_POLL) {
2553                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2554
2555                         /* Mbox command <mbxCommand> cannot issue */
2556                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2557                         return MBX_NOT_FINISHED;
2558                 }
2559
2560                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2561                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2562                         /* Mbox command <mbxCommand> cannot issue */
2563                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2564                         return MBX_NOT_FINISHED;
2565                 }
2566
2567                 /* Another mailbox command is still being processed, queue this
2568                  * command to be processed later.
2569                  */
2570                 lpfc_mbox_put(phba, pmbox);
2571
2572                 /* Mbox cmd issue - BUSY */
2573                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2574                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
2575                                 "x%x x%x x%x x%x\n",
2576                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2577                                 mb->mbxCommand, phba->pport->port_state,
2578                                 psli->sli_flag, flag);
2579
2580                 psli->slistat.mbox_busy++;
2581                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2582
2583                 if (pmbox->vport) {
2584                         lpfc_debugfs_disc_trc(pmbox->vport,
2585                                 LPFC_DISC_TRC_MBOX_VPORT,
2586                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
2587                                 (uint32_t)mb->mbxCommand,
2588                                 mb->un.varWords[0], mb->un.varWords[1]);
2589                 }
2590                 else {
2591                         lpfc_debugfs_disc_trc(phba->pport,
2592                                 LPFC_DISC_TRC_MBOX,
2593                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
2594                                 (uint32_t)mb->mbxCommand,
2595                                 mb->un.varWords[0], mb->un.varWords[1]);
2596                 }
2597
2598                 return MBX_BUSY;
2599         }
2600
2601         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2602
2603         /* If we are not polling, we MUST be in SLI2 mode */
2604         if (flag != MBX_POLL) {
2605                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2606                     (mb->mbxCommand != MBX_KILL_BOARD)) {
2607                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2608                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2609                         /* Mbox command <mbxCommand> cannot issue */
2610                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2611                         return MBX_NOT_FINISHED;
2612                 }
2613                 /* timeout active mbox command */
2614                 mod_timer(&psli->mbox_tmo, (jiffies +
2615                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2616         }
2617
2618         /* Mailbox cmd <cmd> issue */
2619         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2620                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2621                         "x%x\n",
2622                         pmbox->vport ? pmbox->vport->vpi : 0,
2623                         mb->mbxCommand, phba->pport->port_state,
2624                         psli->sli_flag, flag);
2625
2626         if (mb->mbxCommand != MBX_HEARTBEAT) {
2627                 if (pmbox->vport) {
2628                         lpfc_debugfs_disc_trc(pmbox->vport,
2629                                 LPFC_DISC_TRC_MBOX_VPORT,
2630                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2631                                 (uint32_t)mb->mbxCommand,
2632                                 mb->un.varWords[0], mb->un.varWords[1]);
2633                 }
2634                 else {
2635                         lpfc_debugfs_disc_trc(phba->pport,
2636                                 LPFC_DISC_TRC_MBOX,
2637                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
2638                                 (uint32_t)mb->mbxCommand,
2639                                 mb->un.varWords[0], mb->un.varWords[1]);
2640                 }
2641         }
2642
2643         psli->slistat.mbox_cmd++;
2644         evtctr = psli->slistat.mbox_event;
2645
2646         /* next set own bit for the adapter and copy over command word */
2647         mb->mbxOwner = OWN_CHIP;
2648
2649         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2650                 /* First copy command data to host SLIM area */
2651                 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2652         } else {
2653                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2654                         /* copy command data into host mbox for cmpl */
2655                         lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2656                                               MAILBOX_CMD_SIZE);
2657                 }
2658
2659                 /* First copy mbox command data to HBA SLIM, skip past first
2660                    word */
2661                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2662                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2663                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
2664
2665                 /* Next copy over first word, with mbxOwner set */
2666                 ldata = *((volatile uint32_t *)mb);
2667                 to_slim = phba->MBslimaddr;
2668                 writel(ldata, to_slim);
2669                 readl(to_slim); /* flush */
2670
2671                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2672                         /* switch over to host mailbox */
2673                         psli->sli_flag |= LPFC_SLI2_ACTIVE;
2674                 }
2675         }
2676
2677         wmb();
2678         /* interrupt board to doit right away */
2679         writel(CA_MBATT, phba->CAregaddr);
2680         readl(phba->CAregaddr); /* flush */
2681
2682         switch (flag) {
2683         case MBX_NOWAIT:
2684                 /* Don't wait for it to finish, just return */
2685                 psli->mbox_active = pmbox;
2686                 break;
2687
2688         case MBX_POLL:
2689                 psli->mbox_active = NULL;
2690                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2691                         /* First read mbox status word */
2692                         word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2693                         word0 = le32_to_cpu(word0);
2694                 } else {
2695                         /* First read mbox status word */
2696                         word0 = readl(phba->MBslimaddr);
2697                 }
2698
2699                 /* Read the HBA Host Attention Register */
2700                 ha_copy = readl(phba->HAregaddr);
2701
2702                 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2703                 i *= 1000; /* Convert to ms */
2704
2705                 /* Wait for command to complete */
2706                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2707                        (!(ha_copy & HA_MBATT) &&
2708                         (phba->link_state > LPFC_WARM_START))) {
2709                         if (i-- <= 0) {
2710                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2711                                 spin_unlock_irqrestore(&phba->hbalock,
2712                                                        drvr_flag);
2713                                 return MBX_NOT_FINISHED;
2714                         }
2715
2716                         /* Check if we took a mbox interrupt while we were
2717                            polling */
2718                         if (((word0 & OWN_CHIP) != OWN_CHIP)
2719                             && (evtctr != psli->slistat.mbox_event))
2720                                 break;
2721
2722                         spin_unlock_irqrestore(&phba->hbalock,
2723                                                drvr_flag);
2724
2725                         msleep(1);
2726
2727                         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2728
2729                         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2730                                 /* First copy command data */
2731                                 word0 = *((volatile uint32_t *)
2732                                                 &phba->slim2p->mbx);
2733                                 word0 = le32_to_cpu(word0);
2734                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2735                                         MAILBOX_t *slimmb;
2736                                         volatile uint32_t slimword0;
2737                                         /* Check real SLIM for any errors */
2738                                         slimword0 = readl(phba->MBslimaddr);
2739                                         slimmb = (MAILBOX_t *) & slimword0;
2740                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2741                                             && slimmb->mbxStatus) {
2742                                                 psli->sli_flag &=
2743                                                     ~LPFC_SLI2_ACTIVE;
2744                                                 word0 = slimword0;
2745                                         }
2746                                 }
2747                         } else {
2748                                 /* First copy command data */
2749                                 word0 = readl(phba->MBslimaddr);
2750                         }
2751                         /* Read the HBA Host Attention Register */
2752                         ha_copy = readl(phba->HAregaddr);
2753                 }
2754
2755                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2756                         /* copy results back to user */
2757                         lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2758                                               MAILBOX_CMD_SIZE);
2759                 } else {
2760                         /* First copy command data */
2761                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2762                                                         MAILBOX_CMD_SIZE);
2763                         if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2764                                 pmbox->context2) {
2765                                 lpfc_memcpy_from_slim((void *)pmbox->context2,
2766                                       phba->MBslimaddr + DMP_RSP_OFFSET,
2767                                                       mb->un.varDmp.word_cnt);
2768                         }
2769                 }
2770
2771                 writel(HA_MBATT, phba->HAregaddr);
2772                 readl(phba->HAregaddr); /* flush */
2773
2774                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2775                 status = mb->mbxStatus;
2776         }
2777
2778         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2779         return status;
2780 }
2781
2782 /*
2783  * Caller needs to hold lock.
2784  */
2785 static void
2786 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2787                     struct lpfc_iocbq *piocb)
2788 {
2789         /* Insert the caller's iocb in the txq tail for later processing. */
2790         list_add_tail(&piocb->list, &pring->txq);
2791         pring->txq_cnt++;
2792 }
2793
2794 static struct lpfc_iocbq *
2795 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2796                    struct lpfc_iocbq **piocb)
2797 {
2798         struct lpfc_iocbq * nextiocb;
2799
2800         nextiocb = lpfc_sli_ringtx_get(phba, pring);
2801         if (!nextiocb) {
2802                 nextiocb = *piocb;
2803                 *piocb = NULL;
2804         }
2805
2806         return nextiocb;
2807 }
2808
2809 /*
2810  * Lockless version of lpfc_sli_issue_iocb.
2811  */
2812 static int
2813 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2814                     struct lpfc_iocbq *piocb, uint32_t flag)
2815 {
2816         struct lpfc_iocbq *nextiocb;
2817         IOCB_t *iocb;
2818
2819         if (piocb->iocb_cmpl && (!piocb->vport) &&
2820            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2821            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2822                 lpfc_printf_log(phba, KERN_ERR,
2823                                 LOG_SLI | LOG_VPORT,
2824                                 "1807 IOCB x%x failed. No vport\n",
2825                                 piocb->iocb.ulpCommand);
2826                 dump_stack();
2827                 return IOCB_ERROR;
2828         }
2829
2830
2831         /* If the PCI channel is in offline state, do not post iocbs. */
2832         if (unlikely(pci_channel_offline(phba->pcidev)))
2833                 return IOCB_ERROR;
2834
2835         /*
2836          * We should never get an IOCB if we are in a < LINK_DOWN state
2837          */
2838         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2839                 return IOCB_ERROR;
2840
2841         /*
2842          * Check to see if we are blocking IOCB processing because of a
2843          * outstanding event.
2844          */
2845         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
2846                 goto iocb_busy;
2847
2848         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2849                 /*
2850                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2851                  * can be issued if the link is not up.
2852                  */
2853                 switch (piocb->iocb.ulpCommand) {
2854                 case CMD_QUE_RING_BUF_CN:
2855                 case CMD_QUE_RING_BUF64_CN:
2856                         /*
2857                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2858                          * completion, iocb_cmpl MUST be 0.
2859                          */
2860                         if (piocb->iocb_cmpl)
2861                                 piocb->iocb_cmpl = NULL;
2862                         /*FALLTHROUGH*/
2863                 case CMD_CREATE_XRI_CR:
2864                 case CMD_CLOSE_XRI_CN:
2865                 case CMD_CLOSE_XRI_CX:
2866                         break;
2867                 default:
2868                         goto iocb_busy;
2869                 }
2870
2871         /*
2872          * For FCP commands, we must be in a state where we can process link
2873          * attention events.
2874          */
2875         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2876                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2877                 goto iocb_busy;
2878         }
2879
2880         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2881                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2882                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2883
2884         if (iocb)
2885                 lpfc_sli_update_ring(phba, pring);
2886         else
2887                 lpfc_sli_update_full_ring(phba, pring);
2888
2889         if (!piocb)
2890                 return IOCB_SUCCESS;
2891
2892         goto out_busy;
2893
2894  iocb_busy:
2895         pring->stats.iocb_cmd_delay++;
2896
2897  out_busy:
2898
2899         if (!(flag & SLI_IOCB_RET_IOCB)) {
2900                 __lpfc_sli_ringtx_put(phba, pring, piocb);
2901                 return IOCB_SUCCESS;
2902         }
2903
2904         return IOCB_BUSY;
2905 }
2906
2907
2908 int
2909 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2910                     struct lpfc_iocbq *piocb, uint32_t flag)
2911 {
2912         unsigned long iflags;
2913         int rc;
2914
2915         spin_lock_irqsave(&phba->hbalock, iflags);
2916         rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2917         spin_unlock_irqrestore(&phba->hbalock, iflags);
2918
2919         return rc;
2920 }
2921
2922 static int
2923 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2924 {
2925         struct lpfc_sli *psli;
2926         struct lpfc_sli_ring *pring;
2927
2928         psli = &phba->sli;
2929
2930         /* Adjust cmd/rsp ring iocb entries more evenly */
2931
2932         /* Take some away from the FCP ring */
2933         pring = &psli->ring[psli->fcp_ring];
2934         pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2935         pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2936         pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2937         pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2938
2939         /* and give them to the extra ring */
2940         pring = &psli->ring[psli->extra_ring];
2941
2942         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2943         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2944         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2945         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2946
2947         /* Setup default profile for this ring */
2948         pring->iotag_max = 4096;
2949         pring->num_mask = 1;
2950         pring->prt[0].profile = 0;      /* Mask 0 */
2951         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2952         pring->prt[0].type = phba->cfg_multi_ring_type;
2953         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2954         return 0;
2955 }
2956
2957 static void
2958 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
2959         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
2960 {
2961         IOCB_t *icmd;
2962         uint16_t evt_code;
2963         uint16_t temp;
2964         struct temp_event temp_event_data;
2965         struct Scsi_Host *shost;
2966
2967         icmd = &iocbq->iocb;
2968         evt_code = icmd->un.asyncstat.evt_code;
2969         temp = icmd->ulpContext;
2970
2971         if ((evt_code != ASYNC_TEMP_WARN) &&
2972                 (evt_code != ASYNC_TEMP_SAFE)) {
2973                 lpfc_printf_log(phba,
2974                         KERN_ERR,
2975                         LOG_SLI,
2976                         "0327 Ring %d handler: unexpected ASYNC_STATUS"
2977                         " evt_code 0x%x\n",
2978                         pring->ringno,
2979                         icmd->un.asyncstat.evt_code);
2980                 return;
2981         }
2982         temp_event_data.data = (uint32_t)temp;
2983         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2984         if (evt_code == ASYNC_TEMP_WARN) {
2985                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
2986                 lpfc_printf_log(phba,
2987                                 KERN_WARNING,
2988                                 LOG_TEMP,
2989                                 "0339 Adapter is very hot, please take "
2990                                 "corrective action. temperature : %d Celsius\n",
2991                                 temp);
2992         }
2993         if (evt_code == ASYNC_TEMP_SAFE) {
2994                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
2995                 lpfc_printf_log(phba,
2996                                 KERN_INFO,
2997                                 LOG_TEMP,
2998                                 "0340 Adapter temperature is OK now. "
2999                                 "temperature : %d Celsius\n",
3000                                 temp);
3001         }
3002
3003         /* Send temperature change event to applications */
3004         shost = lpfc_shost_from_vport(phba->pport);
3005         fc_host_post_vendor_event(shost, fc_get_event_number(),
3006                 sizeof(temp_event_data), (char *) &temp_event_data,
3007                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
3008
3009 }
3010
3011
3012 int
3013 lpfc_sli_setup(struct lpfc_hba *phba)
3014 {
3015         int i, totiocbsize = 0;
3016         struct lpfc_sli *psli = &phba->sli;
3017         struct lpfc_sli_ring *pring;
3018
3019         psli->num_rings = MAX_CONFIGURED_RINGS;
3020         psli->sli_flag = 0;
3021         psli->fcp_ring = LPFC_FCP_RING;
3022         psli->next_ring = LPFC_FCP_NEXT_RING;
3023         psli->extra_ring = LPFC_EXTRA_RING;
3024
3025         psli->iocbq_lookup = NULL;
3026         psli->iocbq_lookup_len = 0;
3027         psli->last_iotag = 0;
3028
3029         for (i = 0; i < psli->num_rings; i++) {
3030                 pring = &psli->ring[i];
3031                 switch (i) {
3032                 case LPFC_FCP_RING:     /* ring 0 - FCP */
3033                         /* numCiocb and numRiocb are used in config_port */
3034                         pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
3035                         pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
3036                         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3037                         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3038                         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3039                         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3040                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3041                                                         SLI3_IOCB_CMD_SIZE :
3042                                                         SLI2_IOCB_CMD_SIZE;
3043                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3044                                                         SLI3_IOCB_RSP_SIZE :
3045                                                         SLI2_IOCB_RSP_SIZE;
3046                         pring->iotag_ctr = 0;
3047                         pring->iotag_max =
3048                             (phba->cfg_hba_queue_depth * 2);
3049                         pring->fast_iotag = pring->iotag_max;
3050                         pring->num_mask = 0;
3051                         break;
3052                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
3053                         /* numCiocb and numRiocb are used in config_port */
3054                         pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
3055                         pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3056                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3057                                                         SLI3_IOCB_CMD_SIZE :
3058                                                         SLI2_IOCB_CMD_SIZE;
3059                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3060                                                         SLI3_IOCB_RSP_SIZE :
3061                                                         SLI2_IOCB_RSP_SIZE;
3062                         pring->iotag_max = phba->cfg_hba_queue_depth;
3063                         pring->num_mask = 0;
3064                         break;
3065                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
3066                         /* numCiocb and numRiocb are used in config_port */
3067                         pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
3068                         pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3069                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3070                                                         SLI3_IOCB_CMD_SIZE :
3071                                                         SLI2_IOCB_CMD_SIZE;
3072                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3073                                                         SLI3_IOCB_RSP_SIZE :
3074                                                         SLI2_IOCB_RSP_SIZE;
3075                         pring->fast_iotag = 0;
3076                         pring->iotag_ctr = 0;
3077                         pring->iotag_max = 4096;
3078                         pring->lpfc_sli_rcv_async_status =
3079                                 lpfc_sli_async_event_handler;
3080                         pring->num_mask = 4;
3081                         pring->prt[0].profile = 0;      /* Mask 0 */
3082                         pring->prt[0].rctl = FC_ELS_REQ;
3083                         pring->prt[0].type = FC_ELS_DATA;
3084                         pring->prt[0].lpfc_sli_rcv_unsol_event =
3085                             lpfc_els_unsol_event;
3086                         pring->prt[1].profile = 0;      /* Mask 1 */
3087                         pring->prt[1].rctl = FC_ELS_RSP;
3088                         pring->prt[1].type = FC_ELS_DATA;
3089                         pring->prt[1].lpfc_sli_rcv_unsol_event =
3090                             lpfc_els_unsol_event;
3091                         pring->prt[2].profile = 0;      /* Mask 2 */
3092                         /* NameServer Inquiry */
3093                         pring->prt[2].rctl = FC_UNSOL_CTL;
3094                         /* NameServer */
3095                         pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3096                         pring->prt[2].lpfc_sli_rcv_unsol_event =
3097                             lpfc_ct_unsol_event;
3098                         pring->prt[3].profile = 0;      /* Mask 3 */
3099                         /* NameServer response */
3100                         pring->prt[3].rctl = FC_SOL_CTL;
3101                         /* NameServer */
3102                         pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3103                         pring->prt[3].lpfc_sli_rcv_unsol_event =
3104                             lpfc_ct_unsol_event;
3105                         break;
3106                 }
3107                 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3108                                 (pring->numRiocb * pring->sizeRiocb);
3109         }
3110         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3111                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
3112                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
3113                        "SLI2 SLIM Data: x%x x%lx\n",
3114                        phba->brd_no, totiocbsize,
3115                        (unsigned long) MAX_SLIM_IOCB_SIZE);
3116         }
3117         if (phba->cfg_multi_ring_support == 2)
3118                 lpfc_extra_ring_setup(phba);
3119
3120         return 0;
3121 }
3122
3123 int
3124 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3125 {
3126         struct lpfc_sli *psli;
3127         struct lpfc_sli_ring *pring;
3128         int i;
3129
3130         psli = &phba->sli;
3131         spin_lock_irq(&phba->hbalock);
3132         INIT_LIST_HEAD(&psli->mboxq);
3133         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3134         /* Initialize list headers for txq and txcmplq as double linked lists */
3135         for (i = 0; i < psli->num_rings; i++) {
3136                 pring = &psli->ring[i];
3137                 pring->ringno = i;
3138                 pring->next_cmdidx  = 0;
3139                 pring->local_getidx = 0;
3140                 pring->cmdidx = 0;
3141                 INIT_LIST_HEAD(&pring->txq);
3142                 INIT_LIST_HEAD(&pring->txcmplq);
3143                 INIT_LIST_HEAD(&pring->iocb_continueq);
3144                 INIT_LIST_HEAD(&pring->postbufq);
3145         }
3146         spin_unlock_irq(&phba->hbalock);
3147         return 1;
3148 }
3149
3150 int
3151 lpfc_sli_host_down(struct lpfc_vport *vport)
3152 {
3153         LIST_HEAD(completions);
3154         struct lpfc_hba *phba = vport->phba;
3155         struct lpfc_sli *psli = &phba->sli;
3156         struct lpfc_sli_ring *pring;
3157         struct lpfc_iocbq *iocb, *next_iocb;
3158         int i;
3159         unsigned long flags = 0;
3160         uint16_t prev_pring_flag;
3161
3162         lpfc_cleanup_discovery_resources(vport);
3163
3164         spin_lock_irqsave(&phba->hbalock, flags);
3165         for (i = 0; i < psli->num_rings; i++) {
3166                 pring = &psli->ring[i];
3167                 prev_pring_flag = pring->flag;
3168                 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3169                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
3170                 /*
3171                  * Error everything on the txq since these iocbs have not been
3172                  * given to the FW yet.
3173                  */
3174                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3175                         if (iocb->vport != vport)
3176                                 continue;
3177                         list_move_tail(&iocb->list, &completions);
3178                         pring->txq_cnt--;
3179                 }
3180
3181                 /* Next issue ABTS for everything on the txcmplq */
3182                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3183                                                                         list) {
3184                         if (iocb->vport != vport)
3185                                 continue;
3186                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3187                 }
3188
3189                 pring->flag = prev_pring_flag;
3190         }
3191
3192         spin_unlock_irqrestore(&phba->hbalock, flags);
3193
3194         while (!list_empty(&completions)) {
3195                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3196
3197                 if (!iocb->iocb_cmpl)
3198                         lpfc_sli_release_iocbq(phba, iocb);
3199                 else {
3200                         iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3201                         iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3202                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3203                 }
3204         }
3205         return 1;
3206 }
3207
3208 int
3209 lpfc_sli_hba_down(struct lpfc_hba *phba)
3210 {
3211         LIST_HEAD(completions);
3212         struct lpfc_sli *psli = &phba->sli;
3213         struct lpfc_sli_ring *pring;
3214         LPFC_MBOXQ_t *pmb;
3215         struct lpfc_iocbq *iocb;
3216         IOCB_t *cmd = NULL;
3217         int i;
3218         unsigned long flags = 0;
3219
3220         lpfc_hba_down_prep(phba);
3221
3222         lpfc_fabric_abort_hba(phba);
3223
3224         spin_lock_irqsave(&phba->hbalock, flags);
3225         for (i = 0; i < psli->num_rings; i++) {
3226                 pring = &psli->ring[i];
3227                 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3228                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
3229
3230                 /*
3231                  * Error everything on the txq since these iocbs have not been
3232                  * given to the FW yet.
3233                  */
3234                 list_splice_init(&pring->txq, &completions);
3235                 pring->txq_cnt = 0;
3236
3237         }
3238         spin_unlock_irqrestore(&phba->hbalock, flags);
3239
3240         while (!list_empty(&completions)) {
3241                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3242                 cmd = &iocb->iocb;
3243
3244                 if (!iocb->iocb_cmpl)
3245                         lpfc_sli_release_iocbq(phba, iocb);
3246                 else {
3247                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3248                         cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3249                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3250                 }
3251         }
3252
3253         /* Return any active mbox cmds */
3254         del_timer_sync(&psli->mbox_tmo);
3255         spin_lock_irqsave(&phba->hbalock, flags);
3256
3257         spin_lock(&phba->pport->work_port_lock);
3258         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3259         spin_unlock(&phba->pport->work_port_lock);
3260
3261         if (psli->mbox_active) {
3262                 list_add_tail(&psli->mbox_active->list, &completions);
3263                 psli->mbox_active = NULL;
3264                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3265         }
3266
3267         /* Return any pending or completed mbox cmds */
3268         list_splice_init(&phba->sli.mboxq, &completions);
3269         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3270         INIT_LIST_HEAD(&psli->mboxq);
3271         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3272
3273         spin_unlock_irqrestore(&phba->hbalock, flags);
3274
3275         while (!list_empty(&completions)) {
3276                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3277                 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3278                 if (pmb->mbox_cmpl) {
3279                         pmb->mbox_cmpl(phba,pmb);
3280                 }
3281         }
3282         return 1;
3283 }
3284
3285 void
3286 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3287 {
3288         uint32_t *src = srcp;
3289         uint32_t *dest = destp;
3290         uint32_t ldata;
3291         int i;
3292
3293         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3294                 ldata = *src;
3295                 ldata = le32_to_cpu(ldata);
3296                 *dest = ldata;
3297                 src++;
3298                 dest++;
3299         }
3300 }
3301
3302 int
3303 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3304                          struct lpfc_dmabuf *mp)
3305 {
3306         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3307            later */
3308         spin_lock_irq(&phba->hbalock);
3309         list_add_tail(&mp->list, &pring->postbufq);
3310         pring->postbufq_cnt++;
3311         spin_unlock_irq(&phba->hbalock);
3312         return 0;
3313 }
3314
3315
3316 struct lpfc_dmabuf *
3317 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3318                          dma_addr_t phys)
3319 {
3320         struct lpfc_dmabuf *mp, *next_mp;
3321         struct list_head *slp = &pring->postbufq;
3322
3323         /* Search postbufq, from the begining, looking for a match on phys */
3324         spin_lock_irq(&phba->hbalock);
3325         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3326                 if (mp->phys == phys) {
3327                         list_del_init(&mp->list);
3328                         pring->postbufq_cnt--;
3329                         spin_unlock_irq(&phba->hbalock);
3330                         return mp;
3331                 }
3332         }
3333
3334         spin_unlock_irq(&phba->hbalock);
3335         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3336                         "0410 Cannot find virtual addr for mapped buf on "
3337                         "ring %d Data x%llx x%p x%p x%x\n",
3338                         pring->ringno, (unsigned long long)phys,
3339                         slp->next, slp->prev, pring->postbufq_cnt);
3340         return NULL;
3341 }
3342
3343 static void
3344 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3345                         struct lpfc_iocbq *rspiocb)
3346 {
3347         IOCB_t *irsp = &rspiocb->iocb;
3348         uint16_t abort_iotag, abort_context;
3349         struct lpfc_iocbq *abort_iocb;
3350         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3351
3352         abort_iocb = NULL;
3353
3354         if (irsp->ulpStatus) {
3355                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3356                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3357
3358                 spin_lock_irq(&phba->hbalock);
3359                 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3360                         abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3361
3362                 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3363                                 "0327 Cannot abort els iocb %p "
3364                                 "with tag %x context %x, abort status %x, "
3365                                 "abort code %x\n",
3366                                 abort_iocb, abort_iotag, abort_context,
3367                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
3368
3369                 /*
3370                  * make sure we have the right iocbq before taking it
3371                  * off the txcmplq and try to call completion routine.
3372                  */
3373                 if (!abort_iocb ||
3374                     abort_iocb->iocb.ulpContext != abort_context ||
3375                     (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3376                         spin_unlock_irq(&phba->hbalock);
3377                 else {
3378                         list_del_init(&abort_iocb->list);
3379                         pring->txcmplq_cnt--;
3380                         spin_unlock_irq(&phba->hbalock);
3381
3382                         abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3383                         abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3384                         abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3385                         (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3386                 }
3387         }
3388
3389         lpfc_sli_release_iocbq(phba, cmdiocb);
3390         return;
3391 }
3392
3393 static void
3394 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3395                      struct lpfc_iocbq *rspiocb)
3396 {
3397         IOCB_t *irsp = &rspiocb->iocb;
3398
3399         /* ELS cmd tag <ulpIoTag> completes */
3400         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3401                         "0133 Ignoring ELS cmd tag x%x completion Data: "
3402                         "x%x x%x x%x\n",
3403                         irsp->ulpIoTag, irsp->ulpStatus,
3404                         irsp->un.ulpWord[4], irsp->ulpTimeout);
3405         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3406                 lpfc_ct_free_iocb(phba, cmdiocb);
3407         else
3408                 lpfc_els_free_iocb(phba, cmdiocb);
3409         return;
3410 }
3411
3412 int
3413 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3414                            struct lpfc_iocbq *cmdiocb)
3415 {
3416         struct lpfc_vport *vport = cmdiocb->vport;
3417         struct lpfc_iocbq *abtsiocbp;
3418         IOCB_t *icmd = NULL;
3419         IOCB_t *iabt = NULL;
3420         int retval = IOCB_ERROR;
3421
3422         /*
3423          * There are certain command types we don't want to abort.  And we
3424          * don't want to abort commands that are already in the process of
3425          * being aborted.
3426          */
3427         icmd = &cmdiocb->iocb;
3428         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3429             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3430             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3431                 return 0;
3432
3433         /* If we're unloading, don't abort iocb on the ELS ring, but change the
3434          * callback so that nothing happens when it finishes.
3435          */
3436         if ((vport->load_flag & FC_UNLOADING) &&
3437             (pring->ringno == LPFC_ELS_RING)) {
3438                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3439                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3440                 else
3441                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3442                 goto abort_iotag_exit;
3443         }
3444
3445         /* issue ABTS for this IOCB based on iotag */
3446         abtsiocbp = __lpfc_sli_get_iocbq(phba);
3447         if (abtsiocbp == NULL)
3448                 return 0;
3449
3450         /* This signals the response to set the correct status
3451          * before calling the completion handler.
3452          */
3453         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3454
3455         iabt = &abtsiocbp->iocb;
3456         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3457         iabt->un.acxri.abortContextTag = icmd->ulpContext;
3458         iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3459         iabt->ulpLe = 1;
3460         iabt->ulpClass = icmd->ulpClass;
3461
3462         if (phba->link_state >= LPFC_LINK_UP)
3463                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3464         else
3465                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3466
3467         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3468
3469         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3470                          "0339 Abort xri x%x, original iotag x%x, "
3471                          "abort cmd iotag x%x\n",
3472                          iabt->un.acxri.abortContextTag,
3473                          iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3474         retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3475
3476 abort_iotag_exit:
3477         /*
3478          * Caller to this routine should check for IOCB_ERROR
3479          * and handle it properly.  This routine no longer removes
3480          * iocb off txcmplq and call compl in case of IOCB_ERROR.
3481          */
3482         return retval;
3483 }
3484
3485 static int
3486 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3487                            uint16_t tgt_id, uint64_t lun_id,
3488                            lpfc_ctx_cmd ctx_cmd)
3489 {
3490         struct lpfc_scsi_buf *lpfc_cmd;
3491         struct scsi_cmnd *cmnd;
3492         int rc = 1;
3493
3494         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
3495                 return rc;
3496
3497         if (iocbq->vport != vport)
3498                 return rc;
3499
3500         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3501         cmnd = lpfc_cmd->pCmd;
3502
3503         if (cmnd == NULL)
3504                 return rc;
3505
3506         switch (ctx_cmd) {
3507         case LPFC_CTX_LUN:
3508                 if ((cmnd->device->id == tgt_id) &&
3509                     (cmnd->device->lun == lun_id))
3510                         rc = 0;
3511                 break;
3512         case LPFC_CTX_TGT:
3513                 if (cmnd->device->id == tgt_id)
3514                         rc = 0;
3515                 break;
3516         case LPFC_CTX_HOST:
3517                 rc = 0;
3518                 break;
3519         default:
3520                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3521                         __FUNCTION__, ctx_cmd);
3522                 break;
3523         }
3524
3525         return rc;
3526 }
3527
3528 int
3529 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3530                   lpfc_ctx_cmd ctx_cmd)
3531 {
3532         struct lpfc_hba *phba = vport->phba;
3533         struct lpfc_iocbq *iocbq;
3534         int sum, i;
3535
3536         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3537                 iocbq = phba->sli.iocbq_lookup[i];
3538
3539                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
3540                                                 ctx_cmd) == 0)
3541                         sum++;
3542         }
3543
3544         return sum;
3545 }
3546
3547 void
3548 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3549                         struct lpfc_iocbq *rspiocb)
3550 {
3551         lpfc_sli_release_iocbq(phba, cmdiocb);
3552         return;
3553 }
3554
3555 int
3556 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3557                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
3558 {
3559         struct lpfc_hba *phba = vport->phba;
3560         struct lpfc_iocbq *iocbq;
3561         struct lpfc_iocbq *abtsiocb;
3562         IOCB_t *cmd = NULL;
3563         int errcnt = 0, ret_val = 0;
3564         int i;
3565
3566         for (i = 1; i <= phba->sli.last_iotag; i++) {
3567                 iocbq = phba->sli.iocbq_lookup[i];
3568
3569                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
3570                                                abort_cmd) != 0)
3571                         continue;
3572
3573                 /* issue ABTS for this IOCB based on iotag */
3574                 abtsiocb = lpfc_sli_get_iocbq(phba);
3575                 if (abtsiocb == NULL) {
3576                         errcnt++;
3577                         continue;
3578                 }
3579
3580                 cmd = &iocbq->iocb;
3581                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3582                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3583                 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3584                 abtsiocb->iocb.ulpLe = 1;
3585                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3586                 abtsiocb->vport = phba->pport;
3587
3588                 if (lpfc_is_link_up(phba))
3589                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3590                 else
3591                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3592
3593                 /* Setup callback routine and issue the command. */
3594                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3595                 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3596                 if (ret_val == IOCB_ERROR) {
3597                         lpfc_sli_release_iocbq(phba, abtsiocb);
3598                         errcnt++;
3599                         continue;
3600                 }
3601         }
3602
3603         return errcnt;
3604 }
3605
3606 static void
3607 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3608                         struct lpfc_iocbq *cmdiocbq,
3609                         struct lpfc_iocbq *rspiocbq)
3610 {
3611         wait_queue_head_t *pdone_q;
3612         unsigned long iflags;
3613
3614         spin_lock_irqsave(&phba->hbalock, iflags);
3615         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3616         if (cmdiocbq->context2 && rspiocbq)
3617                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3618                        &rspiocbq->iocb, sizeof(IOCB_t));
3619
3620         pdone_q = cmdiocbq->context_un.wait_queue;
3621         if (pdone_q)
3622                 wake_up(pdone_q);
3623         spin_unlock_irqrestore(&phba->hbalock, iflags);
3624         return;
3625 }
3626
3627 /*
3628  * Issue the caller's iocb and wait for its completion, but no longer than the
3629  * caller's timeout.  Note that iocb_flags is cleared before the
3630  * lpfc_sli_issue_call since the wake routine sets a unique value and by
3631  * definition this is a wait function.
3632  */
3633
3634 int
3635 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3636                          struct lpfc_sli_ring *pring,
3637                          struct lpfc_iocbq *piocb,
3638                          struct lpfc_iocbq *prspiocbq,
3639                          uint32_t timeout)
3640 {
3641         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3642         long timeleft, timeout_req = 0;
3643         int retval = IOCB_SUCCESS;
3644         uint32_t creg_val;
3645
3646         /*
3647          * If the caller has provided a response iocbq buffer, then context2
3648          * is NULL or its an error.
3649          */
3650         if (prspiocbq) {
3651                 if (piocb->context2)
3652                         return IOCB_ERROR;
3653                 piocb->context2 = prspiocbq;
3654         }
3655
3656         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3657         piocb->context_un.wait_queue = &done_q;
3658         piocb->iocb_flag &= ~LPFC_IO_WAKE;
3659
3660         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3661                 creg_val = readl(phba->HCregaddr);
3662                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3663                 writel(creg_val, phba->HCregaddr);
3664                 readl(phba->HCregaddr); /* flush */
3665         }
3666
3667         retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3668         if (retval == IOCB_SUCCESS) {
3669                 timeout_req = timeout * HZ;
3670                 timeleft = wait_event_timeout(done_q,
3671                                 piocb->iocb_flag & LPFC_IO_WAKE,
3672                                 timeout_req);
3673
3674                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3675                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3676                                         "0331 IOCB wake signaled\n");
3677                 } else if (timeleft == 0) {
3678                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3679                                         "0338 IOCB wait timeout error - no "
3680                                         "wake response Data x%x\n", timeout);
3681                         retval = IOCB_TIMEDOUT;
3682                 } else {
3683                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3684                                         "0330 IOCB wake NOT set, "
3685                                         "Data x%x x%lx\n",
3686                                         timeout, (timeleft / jiffies));
3687                         retval = IOCB_TIMEDOUT;
3688                 }
3689         } else {
3690                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3691                                 ":0332 IOCB wait issue failed, Data x%x\n",
3692                                 retval);
3693                 retval = IOCB_ERROR;
3694         }
3695
3696         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3697                 creg_val = readl(phba->HCregaddr);
3698                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3699                 writel(creg_val, phba->HCregaddr);
3700                 readl(phba->HCregaddr); /* flush */
3701         }
3702
3703         if (prspiocbq)
3704                 piocb->context2 = NULL;
3705
3706         piocb->context_un.wait_queue = NULL;
3707         piocb->iocb_cmpl = NULL;
3708         return retval;
3709 }
3710
3711 int
3712 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3713                          uint32_t timeout)
3714 {
3715         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3716         int retval;
3717         unsigned long flag;
3718
3719         /* The caller must leave context1 empty. */
3720         if (pmboxq->context1)
3721                 return MBX_NOT_FINISHED;
3722
3723         /* setup wake call as IOCB callback */
3724         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3725         /* setup context field to pass wait_queue pointer to wake function  */
3726         pmboxq->context1 = &done_q;
3727
3728         /* now issue the command */
3729         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3730
3731         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3732                 wait_event_interruptible_timeout(done_q,
3733                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3734                                 timeout * HZ);
3735
3736                 spin_lock_irqsave(&phba->hbalock, flag);
3737                 pmboxq->context1 = NULL;
3738                 /*
3739                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
3740                  * else do not free the resources.
3741                  */
3742                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3743                         retval = MBX_SUCCESS;
3744                 else {
3745                         retval = MBX_TIMEOUT;
3746                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3747                 }
3748                 spin_unlock_irqrestore(&phba->hbalock, flag);
3749         }
3750
3751         return retval;
3752 }
3753
3754 int
3755 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3756 {
3757         struct lpfc_vport *vport = phba->pport;
3758         int i = 0;
3759         uint32_t ha_copy;
3760
3761         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3762                 if (i++ > LPFC_MBOX_TMO * 1000)
3763                         return 1;
3764
3765                 /*
3766                  * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3767                  * did finish. This way we won't get the misleading
3768                  * "Stray Mailbox Interrupt" message.
3769                  */
3770                 spin_lock_irq(&phba->hbalock);
3771                 ha_copy = phba->work_ha;
3772                 phba->work_ha &= ~HA_MBATT;
3773                 spin_unlock_irq(&phba->hbalock);
3774
3775                 if (ha_copy & HA_MBATT)
3776                         if (lpfc_sli_handle_mb_event(phba) == 0)
3777                                 i = 0;
3778
3779                 msleep(1);
3780         }
3781
3782         return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3783 }
3784
3785 irqreturn_t
3786 lpfc_intr_handler(int irq, void *dev_id)
3787 {
3788         struct lpfc_hba  *phba;
3789         uint32_t ha_copy;
3790         uint32_t work_ha_copy;
3791         unsigned long status;
3792         uint32_t control;
3793
3794         MAILBOX_t *mbox, *pmbox;
3795         struct lpfc_vport *vport;
3796         struct lpfc_nodelist *ndlp;
3797         struct lpfc_dmabuf *mp;
3798         LPFC_MBOXQ_t *pmb;
3799         int rc;
3800
3801         /*
3802          * Get the driver's phba structure from the dev_id and
3803          * assume the HBA is not interrupting.
3804          */
3805         phba = (struct lpfc_hba *) dev_id;
3806
3807         if (unlikely(!phba))
3808                 return IRQ_NONE;
3809
3810         /* If the pci channel is offline, ignore all the interrupts. */
3811         if (unlikely(pci_channel_offline(phba->pcidev)))
3812                 return IRQ_NONE;
3813
3814         phba->sli.slistat.sli_intr++;
3815
3816         /*
3817          * Call the HBA to see if it is interrupting.  If not, don't claim
3818          * the interrupt
3819          */
3820
3821         /* Ignore all interrupts during initialization. */
3822         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3823                 return IRQ_NONE;
3824
3825         /*
3826          * Read host attention register to determine interrupt source
3827          * Clear Attention Sources, except Error Attention (to
3828          * preserve status) and Link Attention
3829          */
3830         spin_lock(&phba->hbalock);
3831         ha_copy = readl(phba->HAregaddr);
3832         /* If somebody is waiting to handle an eratt don't process it
3833          * here.  The brdkill function will do this.
3834          */
3835         if (phba->link_flag & LS_IGNORE_ERATT)
3836                 ha_copy &= ~HA_ERATT;
3837         writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3838         readl(phba->HAregaddr); /* flush */
3839         spin_unlock(&phba->hbalock);
3840
3841         if (unlikely(!ha_copy))
3842                 return IRQ_NONE;
3843
3844         work_ha_copy = ha_copy & phba->work_ha_mask;
3845
3846         if (unlikely(work_ha_copy)) {
3847                 if (work_ha_copy & HA_LATT) {
3848                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3849                                 /*
3850                                  * Turn off Link Attention interrupts
3851                                  * until CLEAR_LA done
3852                                  */
3853                                 spin_lock(&phba->hbalock);
3854                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3855                                 control = readl(phba->HCregaddr);
3856                                 control &= ~HC_LAINT_ENA;
3857                                 writel(control, phba->HCregaddr);
3858                                 readl(phba->HCregaddr); /* flush */
3859                                 spin_unlock(&phba->hbalock);
3860                         }
3861                         else
3862                                 work_ha_copy &= ~HA_LATT;
3863                 }
3864
3865                 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3866                         /*
3867                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3868                          * the only slow ring.
3869                          */
3870                         status = (work_ha_copy &
3871                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
3872                         status >>= (4*LPFC_ELS_RING);
3873                         if (status & HA_RXMASK) {
3874                                 spin_lock(&phba->hbalock);
3875                                 control = readl(phba->HCregaddr);
3876
3877                                 lpfc_debugfs_slow_ring_trc(phba,
3878                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
3879                                 control, status,
3880                                 (uint32_t)phba->sli.slistat.sli_intr);
3881
3882                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
3883                                         lpfc_debugfs_slow_ring_trc(phba,
3884                                                 "ISR Disable ring:"
3885                                                 "pwork:x%x hawork:x%x wait:x%x",
3886                                                 phba->work_ha, work_ha_copy,
3887                                                 (uint32_t)((unsigned long)
3888                                                 phba->work_wait));
3889
3890                                         control &=
3891                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
3892                                         writel(control, phba->HCregaddr);
3893                                         readl(phba->HCregaddr); /* flush */
3894                                 }
3895                                 else {
3896                                         lpfc_debugfs_slow_ring_trc(phba,
3897                                                 "ISR slow ring:   pwork:"
3898                                                 "x%x hawork:x%x wait:x%x",
3899                                                 phba->work_ha, work_ha_copy,
3900                                                 (uint32_t)((unsigned long)
3901                                                 phba->work_wait));
3902                                 }
3903                                 spin_unlock(&phba->hbalock);
3904                         }
3905                 }
3906
3907                 if (work_ha_copy & HA_ERATT) {
3908                         phba->link_state = LPFC_HBA_ERROR;
3909                         /*
3910                          * There was a link/board error.  Read the
3911                          * status register to retrieve the error event
3912                          * and process it.
3913                          */
3914                         phba->sli.slistat.err_attn_event++;
3915                         /* Save status info */
3916                         phba->work_hs = readl(phba->HSregaddr);
3917                         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3918                         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3919
3920                         /* Clear Chip error bit */
3921                         writel(HA_ERATT, phba->HAregaddr);
3922                         readl(phba->HAregaddr); /* flush */
3923                         phba->pport->stopped = 1;
3924                 }
3925
3926                 if ((work_ha_copy & HA_MBATT) &&
3927                     (phba->sli.mbox_active)) {
3928                         pmb = phba->sli.mbox_active;
3929                         pmbox = &pmb->mb;
3930                         mbox = &phba->slim2p->mbx;
3931                         vport = pmb->vport;
3932
3933                         /* First check out the status word */
3934                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3935                         if (pmbox->mbxOwner != OWN_HOST) {
3936                                 /*
3937                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
3938                                  * mbxStatus <status>
3939                                  */
3940                                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3941                                                 LOG_SLI,
3942                                                 "(%d):0304 Stray Mailbox "
3943                                                 "Interrupt mbxCommand x%x "
3944                                                 "mbxStatus x%x\n",
3945                                                 (vport ? vport->vpi : 0),
3946                                                 pmbox->mbxCommand,
3947                                                 pmbox->mbxStatus);
3948                         }
3949                         phba->last_completion_time = jiffies;
3950                         del_timer_sync(&phba->sli.mbox_tmo);
3951
3952                         phba->sli.mbox_active = NULL;
3953                         if (pmb->mbox_cmpl) {
3954                                 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3955                                                       MAILBOX_CMD_SIZE);
3956                         }
3957                         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3958                                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3959
3960                                 lpfc_debugfs_disc_trc(vport,
3961                                         LPFC_DISC_TRC_MBOX_VPORT,
3962                                         "MBOX dflt rpi: : status:x%x rpi:x%x",
3963                                         (uint32_t)pmbox->mbxStatus,
3964                                         pmbox->un.varWords[0], 0);
3965
3966                                 if ( !pmbox->mbxStatus) {
3967                                         mp = (struct lpfc_dmabuf *)
3968                                                 (pmb->context1);
3969                                         ndlp = (struct lpfc_nodelist *)
3970                                                 pmb->context2;
3971
3972                                         /* Reg_LOGIN of dflt RPI was successful.
3973                                          * new lets get rid of the RPI using the
3974                                          * same mbox buffer.
3975                                          */
3976                                         lpfc_unreg_login(phba, vport->vpi,
3977                                                 pmbox->un.varWords[0], pmb);
3978                                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3979                                         pmb->context1 = mp;
3980                                         pmb->context2 = ndlp;
3981                                         pmb->vport = vport;
3982                                         spin_lock(&phba->hbalock);
3983                                         phba->sli.sli_flag &=
3984                                                 ~LPFC_SLI_MBOX_ACTIVE;
3985                                         spin_unlock(&phba->hbalock);
3986                                         goto send_current_mbox;
3987                                 }
3988                         }
3989                         spin_lock(&phba->pport->work_port_lock);
3990                         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3991                         spin_unlock(&phba->pport->work_port_lock);
3992                         lpfc_mbox_cmpl_put(phba, pmb);
3993                 }
3994                 if ((work_ha_copy & HA_MBATT) &&
3995                     (phba->sli.mbox_active == NULL)) {
3996 send_next_mbox:
3997                         spin_lock(&phba->hbalock);
3998                         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3999                         pmb = lpfc_mbox_get(phba);
4000                         spin_unlock(&phba->hbalock);
4001 send_current_mbox:
4002                         /* Process next mailbox command if there is one */
4003                         if (pmb != NULL) {
4004                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4005                                 if (rc == MBX_NOT_FINISHED) {
4006                                         pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4007                                         lpfc_mbox_cmpl_put(phba, pmb);
4008                                         goto send_next_mbox;
4009                                 }
4010                         }
4011
4012                 }
4013
4014                 spin_lock(&phba->hbalock);
4015                 phba->work_ha |= work_ha_copy;
4016                 if (phba->work_wait)
4017                         lpfc_worker_wake_up(phba);
4018                 spin_unlock(&phba->hbalock);
4019         }
4020
4021         ha_copy &= ~(phba->work_ha_mask);
4022
4023         /*
4024          * Process all events on FCP ring.  Take the optimized path for
4025          * FCP IO.  Any other IO is slow path and is handled by
4026          * the worker thread.
4027          */
4028         status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
4029         status >>= (4*LPFC_FCP_RING);
4030         if (status & HA_RXMASK)
4031                 lpfc_sli_handle_fast_ring_event(phba,
4032                                                 &phba->sli.ring[LPFC_FCP_RING],
4033                                                 status);
4034
4035         if (phba->cfg_multi_ring_support == 2) {
4036                 /*
4037                  * Process all events on extra ring.  Take the optimized path
4038                  * for extra ring IO.  Any other IO is slow path and is handled
4039                  * by the worker thread.
4040                  */
4041                 status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
4042                 status >>= (4*LPFC_EXTRA_RING);
4043                 if (status & HA_RXMASK) {
4044                         lpfc_sli_handle_fast_ring_event(phba,
4045                                         &phba->sli.ring[LPFC_EXTRA_RING],
4046                                         status);
4047                 }
4048         }
4049         return IRQ_HANDLED;
4050
4051 } /* lpfc_intr_handler */