[SCSI] lpfc 8.3.6 : FCoE Protocol Fixes
[safe/jmp/linux-2.6] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/fc/fc_fs.h>
33 #include <linux/aer.h>
34
35 #include "lpfc_hw4.h"
36 #include "lpfc_hw.h"
37 #include "lpfc_sli.h"
38 #include "lpfc_sli4.h"
39 #include "lpfc_nl.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_compat.h"
46 #include "lpfc_debugfs.h"
47 #include "lpfc_vport.h"
48
49 /* There are only four IOCB completion types. */
50 typedef enum _lpfc_iocb_type {
51         LPFC_UNKNOWN_IOCB,
52         LPFC_UNSOL_IOCB,
53         LPFC_SOL_IOCB,
54         LPFC_ABORT_IOCB
55 } lpfc_iocb_type;
56
57
58 /* Provide function prototypes local to this module. */
59 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
60                                   uint32_t);
61 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
62                               uint8_t *, uint32_t *);
63 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
64                                                          struct lpfc_iocbq *);
65 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
66                                       struct hbq_dmabuf *);
67 static IOCB_t *
68 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
69 {
70         return &iocbq->iocb;
71 }
72
73 /**
74  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
75  * @q: The Work Queue to operate on.
76  * @wqe: The work Queue Entry to put on the Work queue.
77  *
78  * This routine will copy the contents of @wqe to the next available entry on
79  * the @q. This function will then ring the Work Queue Doorbell to signal the
80  * HBA to start processing the Work Queue Entry. This function returns 0 if
81  * successful. If no entries are available on @q then this function will return
82  * -ENOMEM.
83  * The caller is expected to hold the hbalock when calling this routine.
84  **/
85 static uint32_t
86 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
87 {
88         union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
89         struct lpfc_register doorbell;
90         uint32_t host_index;
91
92         /* If the host has not yet processed the next entry then we are done */
93         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
94                 return -ENOMEM;
95         /* set consumption flag every once in a while */
96         if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
97                 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
98
99         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
100
101         /* Update the host index before invoking device */
102         host_index = q->host_index;
103         q->host_index = ((q->host_index + 1) % q->entry_count);
104
105         /* Ring Doorbell */
106         doorbell.word0 = 0;
107         bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
108         bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
109         bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
110         writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
111         readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
112
113         return 0;
114 }
115
116 /**
117  * lpfc_sli4_wq_release - Updates internal hba index for WQ
118  * @q: The Work Queue to operate on.
119  * @index: The index to advance the hba index to.
120  *
121  * This routine will update the HBA index of a queue to reflect consumption of
122  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
123  * an entry the host calls this function to update the queue's internal
124  * pointers. This routine returns the number of entries that were consumed by
125  * the HBA.
126  **/
127 static uint32_t
128 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
129 {
130         uint32_t released = 0;
131
132         if (q->hba_index == index)
133                 return 0;
134         do {
135                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
136                 released++;
137         } while (q->hba_index != index);
138         return released;
139 }
140
141 /**
142  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
143  * @q: The Mailbox Queue to operate on.
144  * @wqe: The Mailbox Queue Entry to put on the Work queue.
145  *
146  * This routine will copy the contents of @mqe to the next available entry on
147  * the @q. This function will then ring the Work Queue Doorbell to signal the
148  * HBA to start processing the Work Queue Entry. This function returns 0 if
149  * successful. If no entries are available on @q then this function will return
150  * -ENOMEM.
151  * The caller is expected to hold the hbalock when calling this routine.
152  **/
153 static uint32_t
154 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
155 {
156         struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
157         struct lpfc_register doorbell;
158         uint32_t host_index;
159
160         /* If the host has not yet processed the next entry then we are done */
161         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
162                 return -ENOMEM;
163         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
164         /* Save off the mailbox pointer for completion */
165         q->phba->mbox = (MAILBOX_t *)temp_mqe;
166
167         /* Update the host index before invoking device */
168         host_index = q->host_index;
169         q->host_index = ((q->host_index + 1) % q->entry_count);
170
171         /* Ring Doorbell */
172         doorbell.word0 = 0;
173         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
174         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
175         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
176         readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
177         return 0;
178 }
179
180 /**
181  * lpfc_sli4_mq_release - Updates internal hba index for MQ
182  * @q: The Mailbox Queue to operate on.
183  *
184  * This routine will update the HBA index of a queue to reflect consumption of
185  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
186  * an entry the host calls this function to update the queue's internal
187  * pointers. This routine returns the number of entries that were consumed by
188  * the HBA.
189  **/
190 static uint32_t
191 lpfc_sli4_mq_release(struct lpfc_queue *q)
192 {
193         /* Clear the mailbox pointer for completion */
194         q->phba->mbox = NULL;
195         q->hba_index = ((q->hba_index + 1) % q->entry_count);
196         return 1;
197 }
198
199 /**
200  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
201  * @q: The Event Queue to get the first valid EQE from
202  *
203  * This routine will get the first valid Event Queue Entry from @q, update
204  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
205  * the Queue (no more work to do), or the Queue is full of EQEs that have been
206  * processed, but not popped back to the HBA then this routine will return NULL.
207  **/
208 static struct lpfc_eqe *
209 lpfc_sli4_eq_get(struct lpfc_queue *q)
210 {
211         struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
212
213         /* If the next EQE is not valid then we are done */
214         if (!bf_get(lpfc_eqe_valid, eqe))
215                 return NULL;
216         /* If the host has not yet processed the next entry then we are done */
217         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
218                 return NULL;
219
220         q->hba_index = ((q->hba_index + 1) % q->entry_count);
221         return eqe;
222 }
223
224 /**
225  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
226  * @q: The Event Queue that the host has completed processing for.
227  * @arm: Indicates whether the host wants to arms this CQ.
228  *
229  * This routine will mark all Event Queue Entries on @q, from the last
230  * known completed entry to the last entry that was processed, as completed
231  * by clearing the valid bit for each completion queue entry. Then it will
232  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
233  * The internal host index in the @q will be updated by this routine to indicate
234  * that the host has finished processing the entries. The @arm parameter
235  * indicates that the queue should be rearmed when ringing the doorbell.
236  *
237  * This function will return the number of EQEs that were popped.
238  **/
239 uint32_t
240 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
241 {
242         uint32_t released = 0;
243         struct lpfc_eqe *temp_eqe;
244         struct lpfc_register doorbell;
245
246         /* while there are valid entries */
247         while (q->hba_index != q->host_index) {
248                 temp_eqe = q->qe[q->host_index].eqe;
249                 bf_set(lpfc_eqe_valid, temp_eqe, 0);
250                 released++;
251                 q->host_index = ((q->host_index + 1) % q->entry_count);
252         }
253         if (unlikely(released == 0 && !arm))
254                 return 0;
255
256         /* ring doorbell for number popped */
257         doorbell.word0 = 0;
258         if (arm) {
259                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
260                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
261         }
262         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
263         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
264         bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
265         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
266         return released;
267 }
268
269 /**
270  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
271  * @q: The Completion Queue to get the first valid CQE from
272  *
273  * This routine will get the first valid Completion Queue Entry from @q, update
274  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
275  * the Queue (no more work to do), or the Queue is full of CQEs that have been
276  * processed, but not popped back to the HBA then this routine will return NULL.
277  **/
278 static struct lpfc_cqe *
279 lpfc_sli4_cq_get(struct lpfc_queue *q)
280 {
281         struct lpfc_cqe *cqe;
282
283         /* If the next CQE is not valid then we are done */
284         if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
285                 return NULL;
286         /* If the host has not yet processed the next entry then we are done */
287         if (((q->hba_index + 1) % q->entry_count) == q->host_index)
288                 return NULL;
289
290         cqe = q->qe[q->hba_index].cqe;
291         q->hba_index = ((q->hba_index + 1) % q->entry_count);
292         return cqe;
293 }
294
295 /**
296  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
297  * @q: The Completion Queue that the host has completed processing for.
298  * @arm: Indicates whether the host wants to arms this CQ.
299  *
300  * This routine will mark all Completion queue entries on @q, from the last
301  * known completed entry to the last entry that was processed, as completed
302  * by clearing the valid bit for each completion queue entry. Then it will
303  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
304  * The internal host index in the @q will be updated by this routine to indicate
305  * that the host has finished processing the entries. The @arm parameter
306  * indicates that the queue should be rearmed when ringing the doorbell.
307  *
308  * This function will return the number of CQEs that were released.
309  **/
310 uint32_t
311 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
312 {
313         uint32_t released = 0;
314         struct lpfc_cqe *temp_qe;
315         struct lpfc_register doorbell;
316
317         /* while there are valid entries */
318         while (q->hba_index != q->host_index) {
319                 temp_qe = q->qe[q->host_index].cqe;
320                 bf_set(lpfc_cqe_valid, temp_qe, 0);
321                 released++;
322                 q->host_index = ((q->host_index + 1) % q->entry_count);
323         }
324         if (unlikely(released == 0 && !arm))
325                 return 0;
326
327         /* ring doorbell for number popped */
328         doorbell.word0 = 0;
329         if (arm)
330                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
331         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
332         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
333         bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
334         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
335         return released;
336 }
337
338 /**
339  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
340  * @q: The Header Receive Queue to operate on.
341  * @wqe: The Receive Queue Entry to put on the Receive queue.
342  *
343  * This routine will copy the contents of @wqe to the next available entry on
344  * the @q. This function will then ring the Receive Queue Doorbell to signal the
345  * HBA to start processing the Receive Queue Entry. This function returns the
346  * index that the rqe was copied to if successful. If no entries are available
347  * on @q then this function will return -ENOMEM.
348  * The caller is expected to hold the hbalock when calling this routine.
349  **/
350 static int
351 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
352                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
353 {
354         struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
355         struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
356         struct lpfc_register doorbell;
357         int put_index = hq->host_index;
358
359         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
360                 return -EINVAL;
361         if (hq->host_index != dq->host_index)
362                 return -EINVAL;
363         /* If the host has not yet processed the next entry then we are done */
364         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
365                 return -EBUSY;
366         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
367         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
368
369         /* Update the host index to point to the next slot */
370         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
371         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
372
373         /* Ring The Header Receive Queue Doorbell */
374         if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
375                 doorbell.word0 = 0;
376                 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
377                        LPFC_RQ_POST_BATCH);
378                 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
379                 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
380         }
381         return put_index;
382 }
383
384 /**
385  * lpfc_sli4_rq_release - Updates internal hba index for RQ
386  * @q: The Header Receive Queue to operate on.
387  *
388  * This routine will update the HBA index of a queue to reflect consumption of
389  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
390  * consumed an entry the host calls this function to update the queue's
391  * internal pointers. This routine returns the number of entries that were
392  * consumed by the HBA.
393  **/
394 static uint32_t
395 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
396 {
397         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
398                 return 0;
399         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
400         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
401         return 1;
402 }
403
404 /**
405  * lpfc_cmd_iocb - Get next command iocb entry in the ring
406  * @phba: Pointer to HBA context object.
407  * @pring: Pointer to driver SLI ring object.
408  *
409  * This function returns pointer to next command iocb entry
410  * in the command ring. The caller must hold hbalock to prevent
411  * other threads consume the next command iocb.
412  * SLI-2/SLI-3 provide different sized iocbs.
413  **/
414 static inline IOCB_t *
415 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
416 {
417         return (IOCB_t *) (((char *) pring->cmdringaddr) +
418                            pring->cmdidx * phba->iocb_cmd_size);
419 }
420
421 /**
422  * lpfc_resp_iocb - Get next response iocb entry in the ring
423  * @phba: Pointer to HBA context object.
424  * @pring: Pointer to driver SLI ring object.
425  *
426  * This function returns pointer to next response iocb entry
427  * in the response ring. The caller must hold hbalock to make sure
428  * that no other thread consume the next response iocb.
429  * SLI-2/SLI-3 provide different sized iocbs.
430  **/
431 static inline IOCB_t *
432 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
433 {
434         return (IOCB_t *) (((char *) pring->rspringaddr) +
435                            pring->rspidx * phba->iocb_rsp_size);
436 }
437
438 /**
439  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
440  * @phba: Pointer to HBA context object.
441  *
442  * This function is called with hbalock held. This function
443  * allocates a new driver iocb object from the iocb pool. If the
444  * allocation is successful, it returns pointer to the newly
445  * allocated iocb object else it returns NULL.
446  **/
447 static struct lpfc_iocbq *
448 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
449 {
450         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
451         struct lpfc_iocbq * iocbq = NULL;
452
453         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
454         return iocbq;
455 }
456
457 /**
458  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
459  * @phba: Pointer to HBA context object.
460  * @xritag: XRI value.
461  *
462  * This function clears the sglq pointer from the array of acive
463  * sglq's. The xritag that is passed in is used to index into the
464  * array. Before the xritag can be used it needs to be adjusted
465  * by subtracting the xribase.
466  *
467  * Returns sglq ponter = success, NULL = Failure.
468  **/
469 static struct lpfc_sglq *
470 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
471 {
472         uint16_t adj_xri;
473         struct lpfc_sglq *sglq;
474         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
475         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
476                 return NULL;
477         sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
478         phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
479         return sglq;
480 }
481
482 /**
483  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
484  * @phba: Pointer to HBA context object.
485  * @xritag: XRI value.
486  *
487  * This function returns the sglq pointer from the array of acive
488  * sglq's. The xritag that is passed in is used to index into the
489  * array. Before the xritag can be used it needs to be adjusted
490  * by subtracting the xribase.
491  *
492  * Returns sglq ponter = success, NULL = Failure.
493  **/
494 static struct lpfc_sglq *
495 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
496 {
497         uint16_t adj_xri;
498         struct lpfc_sglq *sglq;
499         adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
500         if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
501                 return NULL;
502         sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
503         return sglq;
504 }
505
506 /**
507  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
508  * @phba: Pointer to HBA context object.
509  *
510  * This function is called with hbalock held. This function
511  * Gets a new driver sglq object from the sglq list. If the
512  * list is not empty then it is successful, it returns pointer to the newly
513  * allocated sglq object else it returns NULL.
514  **/
515 static struct lpfc_sglq *
516 __lpfc_sli_get_sglq(struct lpfc_hba *phba)
517 {
518         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
519         struct lpfc_sglq *sglq = NULL;
520         uint16_t adj_xri;
521         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
522         if (!sglq)
523                 return NULL;
524         adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
525         phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
526         return sglq;
527 }
528
529 /**
530  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
531  * @phba: Pointer to HBA context object.
532  *
533  * This function is called with no lock held. This function
534  * allocates a new driver iocb object from the iocb pool. If the
535  * allocation is successful, it returns pointer to the newly
536  * allocated iocb object else it returns NULL.
537  **/
538 struct lpfc_iocbq *
539 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
540 {
541         struct lpfc_iocbq * iocbq = NULL;
542         unsigned long iflags;
543
544         spin_lock_irqsave(&phba->hbalock, iflags);
545         iocbq = __lpfc_sli_get_iocbq(phba);
546         spin_unlock_irqrestore(&phba->hbalock, iflags);
547         return iocbq;
548 }
549
550 /**
551  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
552  * @phba: Pointer to HBA context object.
553  * @iocbq: Pointer to driver iocb object.
554  *
555  * This function is called with hbalock held to release driver
556  * iocb object to the iocb pool. The iotag in the iocb object
557  * does not change for each use of the iocb object. This function
558  * clears all other fields of the iocb object when it is freed.
559  * The sqlq structure that holds the xritag and phys and virtual
560  * mappings for the scatter gather list is retrieved from the
561  * active array of sglq. The get of the sglq pointer also clears
562  * the entry in the array. If the status of the IO indiactes that
563  * this IO was aborted then the sglq entry it put on the
564  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
565  * IO has good status or fails for any other reason then the sglq
566  * entry is added to the free list (lpfc_sgl_list).
567  **/
568 static void
569 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
570 {
571         struct lpfc_sglq *sglq;
572         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
573         unsigned long iflag;
574
575         if (iocbq->sli4_xritag == NO_XRI)
576                 sglq = NULL;
577         else
578                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
579         if (sglq)  {
580                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
581                         && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
582                         && (iocbq->iocb.un.ulpWord[4]
583                                 == IOERR_ABORT_REQUESTED))) {
584                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
585                                         iflag);
586                         list_add(&sglq->list,
587                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
588                         spin_unlock_irqrestore(
589                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
590                 } else
591                         list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
592         }
593
594
595         /*
596          * Clean all volatile data fields, preserve iotag and node struct.
597          */
598         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
599         iocbq->sli4_xritag = NO_XRI;
600         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
601 }
602
603 /**
604  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
605  * @phba: Pointer to HBA context object.
606  * @iocbq: Pointer to driver iocb object.
607  *
608  * This function is called with hbalock held to release driver
609  * iocb object to the iocb pool. The iotag in the iocb object
610  * does not change for each use of the iocb object. This function
611  * clears all other fields of the iocb object when it is freed.
612  **/
613 static void
614 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
615 {
616         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
617
618         /*
619          * Clean all volatile data fields, preserve iotag and node struct.
620          */
621         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
622         iocbq->sli4_xritag = NO_XRI;
623         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
624 }
625
626 /**
627  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
628  * @phba: Pointer to HBA context object.
629  * @iocbq: Pointer to driver iocb object.
630  *
631  * This function is called with hbalock held to release driver
632  * iocb object to the iocb pool. The iotag in the iocb object
633  * does not change for each use of the iocb object. This function
634  * clears all other fields of the iocb object when it is freed.
635  **/
636 static void
637 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
638 {
639         phba->__lpfc_sli_release_iocbq(phba, iocbq);
640 }
641
642 /**
643  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
644  * @phba: Pointer to HBA context object.
645  * @iocbq: Pointer to driver iocb object.
646  *
647  * This function is called with no lock held to release the iocb to
648  * iocb pool.
649  **/
650 void
651 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
652 {
653         unsigned long iflags;
654
655         /*
656          * Clean all volatile data fields, preserve iotag and node struct.
657          */
658         spin_lock_irqsave(&phba->hbalock, iflags);
659         __lpfc_sli_release_iocbq(phba, iocbq);
660         spin_unlock_irqrestore(&phba->hbalock, iflags);
661 }
662
663 /**
664  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
665  * @phba: Pointer to HBA context object.
666  * @iocblist: List of IOCBs.
667  * @ulpstatus: ULP status in IOCB command field.
668  * @ulpWord4: ULP word-4 in IOCB command field.
669  *
670  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
671  * on the list by invoking the complete callback function associated with the
672  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
673  * fields.
674  **/
675 void
676 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
677                       uint32_t ulpstatus, uint32_t ulpWord4)
678 {
679         struct lpfc_iocbq *piocb;
680
681         while (!list_empty(iocblist)) {
682                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
683
684                 if (!piocb->iocb_cmpl)
685                         lpfc_sli_release_iocbq(phba, piocb);
686                 else {
687                         piocb->iocb.ulpStatus = ulpstatus;
688                         piocb->iocb.un.ulpWord[4] = ulpWord4;
689                         (piocb->iocb_cmpl) (phba, piocb, piocb);
690                 }
691         }
692         return;
693 }
694
695 /**
696  * lpfc_sli_iocb_cmd_type - Get the iocb type
697  * @iocb_cmnd: iocb command code.
698  *
699  * This function is called by ring event handler function to get the iocb type.
700  * This function translates the iocb command to an iocb command type used to
701  * decide the final disposition of each completed IOCB.
702  * The function returns
703  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
704  * LPFC_SOL_IOCB     if it is a solicited iocb completion
705  * LPFC_ABORT_IOCB   if it is an abort iocb
706  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
707  *
708  * The caller is not required to hold any lock.
709  **/
710 static lpfc_iocb_type
711 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
712 {
713         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
714
715         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
716                 return 0;
717
718         switch (iocb_cmnd) {
719         case CMD_XMIT_SEQUENCE_CR:
720         case CMD_XMIT_SEQUENCE_CX:
721         case CMD_XMIT_BCAST_CN:
722         case CMD_XMIT_BCAST_CX:
723         case CMD_ELS_REQUEST_CR:
724         case CMD_ELS_REQUEST_CX:
725         case CMD_CREATE_XRI_CR:
726         case CMD_CREATE_XRI_CX:
727         case CMD_GET_RPI_CN:
728         case CMD_XMIT_ELS_RSP_CX:
729         case CMD_GET_RPI_CR:
730         case CMD_FCP_IWRITE_CR:
731         case CMD_FCP_IWRITE_CX:
732         case CMD_FCP_IREAD_CR:
733         case CMD_FCP_IREAD_CX:
734         case CMD_FCP_ICMND_CR:
735         case CMD_FCP_ICMND_CX:
736         case CMD_FCP_TSEND_CX:
737         case CMD_FCP_TRSP_CX:
738         case CMD_FCP_TRECEIVE_CX:
739         case CMD_FCP_AUTO_TRSP_CX:
740         case CMD_ADAPTER_MSG:
741         case CMD_ADAPTER_DUMP:
742         case CMD_XMIT_SEQUENCE64_CR:
743         case CMD_XMIT_SEQUENCE64_CX:
744         case CMD_XMIT_BCAST64_CN:
745         case CMD_XMIT_BCAST64_CX:
746         case CMD_ELS_REQUEST64_CR:
747         case CMD_ELS_REQUEST64_CX:
748         case CMD_FCP_IWRITE64_CR:
749         case CMD_FCP_IWRITE64_CX:
750         case CMD_FCP_IREAD64_CR:
751         case CMD_FCP_IREAD64_CX:
752         case CMD_FCP_ICMND64_CR:
753         case CMD_FCP_ICMND64_CX:
754         case CMD_FCP_TSEND64_CX:
755         case CMD_FCP_TRSP64_CX:
756         case CMD_FCP_TRECEIVE64_CX:
757         case CMD_GEN_REQUEST64_CR:
758         case CMD_GEN_REQUEST64_CX:
759         case CMD_XMIT_ELS_RSP64_CX:
760         case DSSCMD_IWRITE64_CR:
761         case DSSCMD_IWRITE64_CX:
762         case DSSCMD_IREAD64_CR:
763         case DSSCMD_IREAD64_CX:
764         case DSSCMD_INVALIDATE_DEK:
765         case DSSCMD_SET_KEK:
766         case DSSCMD_GET_KEK_ID:
767         case DSSCMD_GEN_XFER:
768                 type = LPFC_SOL_IOCB;
769                 break;
770         case CMD_ABORT_XRI_CN:
771         case CMD_ABORT_XRI_CX:
772         case CMD_CLOSE_XRI_CN:
773         case CMD_CLOSE_XRI_CX:
774         case CMD_XRI_ABORTED_CX:
775         case CMD_ABORT_MXRI64_CN:
776         case CMD_XMIT_BLS_RSP64_CX:
777                 type = LPFC_ABORT_IOCB;
778                 break;
779         case CMD_RCV_SEQUENCE_CX:
780         case CMD_RCV_ELS_REQ_CX:
781         case CMD_RCV_SEQUENCE64_CX:
782         case CMD_RCV_ELS_REQ64_CX:
783         case CMD_ASYNC_STATUS:
784         case CMD_IOCB_RCV_SEQ64_CX:
785         case CMD_IOCB_RCV_ELS64_CX:
786         case CMD_IOCB_RCV_CONT64_CX:
787         case CMD_IOCB_RET_XRI64_CX:
788                 type = LPFC_UNSOL_IOCB;
789                 break;
790         case CMD_IOCB_XMIT_MSEQ64_CR:
791         case CMD_IOCB_XMIT_MSEQ64_CX:
792         case CMD_IOCB_RCV_SEQ_LIST64_CX:
793         case CMD_IOCB_RCV_ELS_LIST64_CX:
794         case CMD_IOCB_CLOSE_EXTENDED_CN:
795         case CMD_IOCB_ABORT_EXTENDED_CN:
796         case CMD_IOCB_RET_HBQE64_CN:
797         case CMD_IOCB_FCP_IBIDIR64_CR:
798         case CMD_IOCB_FCP_IBIDIR64_CX:
799         case CMD_IOCB_FCP_ITASKMGT64_CX:
800         case CMD_IOCB_LOGENTRY_CN:
801         case CMD_IOCB_LOGENTRY_ASYNC_CN:
802                 printk("%s - Unhandled SLI-3 Command x%x\n",
803                                 __func__, iocb_cmnd);
804                 type = LPFC_UNKNOWN_IOCB;
805                 break;
806         default:
807                 type = LPFC_UNKNOWN_IOCB;
808                 break;
809         }
810
811         return type;
812 }
813
814 /**
815  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
816  * @phba: Pointer to HBA context object.
817  *
818  * This function is called from SLI initialization code
819  * to configure every ring of the HBA's SLI interface. The
820  * caller is not required to hold any lock. This function issues
821  * a config_ring mailbox command for each ring.
822  * This function returns zero if successful else returns a negative
823  * error code.
824  **/
825 static int
826 lpfc_sli_ring_map(struct lpfc_hba *phba)
827 {
828         struct lpfc_sli *psli = &phba->sli;
829         LPFC_MBOXQ_t *pmb;
830         MAILBOX_t *pmbox;
831         int i, rc, ret = 0;
832
833         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
834         if (!pmb)
835                 return -ENOMEM;
836         pmbox = &pmb->u.mb;
837         phba->link_state = LPFC_INIT_MBX_CMDS;
838         for (i = 0; i < psli->num_rings; i++) {
839                 lpfc_config_ring(phba, i, pmb);
840                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
841                 if (rc != MBX_SUCCESS) {
842                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
843                                         "0446 Adapter failed to init (%d), "
844                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
845                                         "ring %d\n",
846                                         rc, pmbox->mbxCommand,
847                                         pmbox->mbxStatus, i);
848                         phba->link_state = LPFC_HBA_ERROR;
849                         ret = -ENXIO;
850                         break;
851                 }
852         }
853         mempool_free(pmb, phba->mbox_mem_pool);
854         return ret;
855 }
856
857 /**
858  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
859  * @phba: Pointer to HBA context object.
860  * @pring: Pointer to driver SLI ring object.
861  * @piocb: Pointer to the driver iocb object.
862  *
863  * This function is called with hbalock held. The function adds the
864  * new iocb to txcmplq of the given ring. This function always returns
865  * 0. If this function is called for ELS ring, this function checks if
866  * there is a vport associated with the ELS command. This function also
867  * starts els_tmofunc timer if this is an ELS command.
868  **/
869 static int
870 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
871                         struct lpfc_iocbq *piocb)
872 {
873         list_add_tail(&piocb->list, &pring->txcmplq);
874         pring->txcmplq_cnt++;
875         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
876            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
877            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
878                 if (!piocb->vport)
879                         BUG();
880                 else
881                         mod_timer(&piocb->vport->els_tmofunc,
882                                   jiffies + HZ * (phba->fc_ratov << 1));
883         }
884
885
886         return 0;
887 }
888
889 /**
890  * lpfc_sli_ringtx_get - Get first element of the txq
891  * @phba: Pointer to HBA context object.
892  * @pring: Pointer to driver SLI ring object.
893  *
894  * This function is called with hbalock held to get next
895  * iocb in txq of the given ring. If there is any iocb in
896  * the txq, the function returns first iocb in the list after
897  * removing the iocb from the list, else it returns NULL.
898  **/
899 static struct lpfc_iocbq *
900 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901 {
902         struct lpfc_iocbq *cmd_iocb;
903
904         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
905         if (cmd_iocb != NULL)
906                 pring->txq_cnt--;
907         return cmd_iocb;
908 }
909
910 /**
911  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
912  * @phba: Pointer to HBA context object.
913  * @pring: Pointer to driver SLI ring object.
914  *
915  * This function is called with hbalock held and the caller must post the
916  * iocb without releasing the lock. If the caller releases the lock,
917  * iocb slot returned by the function is not guaranteed to be available.
918  * The function returns pointer to the next available iocb slot if there
919  * is available slot in the ring, else it returns NULL.
920  * If the get index of the ring is ahead of the put index, the function
921  * will post an error attention event to the worker thread to take the
922  * HBA to offline state.
923  **/
924 static IOCB_t *
925 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
926 {
927         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
928         uint32_t  max_cmd_idx = pring->numCiocb;
929         if ((pring->next_cmdidx == pring->cmdidx) &&
930            (++pring->next_cmdidx >= max_cmd_idx))
931                 pring->next_cmdidx = 0;
932
933         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
934
935                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
936
937                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
938                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
939                                         "0315 Ring %d issue: portCmdGet %d "
940                                         "is bigger than cmd ring %d\n",
941                                         pring->ringno,
942                                         pring->local_getidx, max_cmd_idx);
943
944                         phba->link_state = LPFC_HBA_ERROR;
945                         /*
946                          * All error attention handlers are posted to
947                          * worker thread
948                          */
949                         phba->work_ha |= HA_ERATT;
950                         phba->work_hs = HS_FFER3;
951
952                         lpfc_worker_wake_up(phba);
953
954                         return NULL;
955                 }
956
957                 if (pring->local_getidx == pring->next_cmdidx)
958                         return NULL;
959         }
960
961         return lpfc_cmd_iocb(phba, pring);
962 }
963
964 /**
965  * lpfc_sli_next_iotag - Get an iotag for the iocb
966  * @phba: Pointer to HBA context object.
967  * @iocbq: Pointer to driver iocb object.
968  *
969  * This function gets an iotag for the iocb. If there is no unused iotag and
970  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
971  * array and assigns a new iotag.
972  * The function returns the allocated iotag if successful, else returns zero.
973  * Zero is not a valid iotag.
974  * The caller is not required to hold any lock.
975  **/
976 uint16_t
977 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
978 {
979         struct lpfc_iocbq **new_arr;
980         struct lpfc_iocbq **old_arr;
981         size_t new_len;
982         struct lpfc_sli *psli = &phba->sli;
983         uint16_t iotag;
984
985         spin_lock_irq(&phba->hbalock);
986         iotag = psli->last_iotag;
987         if(++iotag < psli->iocbq_lookup_len) {
988                 psli->last_iotag = iotag;
989                 psli->iocbq_lookup[iotag] = iocbq;
990                 spin_unlock_irq(&phba->hbalock);
991                 iocbq->iotag = iotag;
992                 return iotag;
993         } else if (psli->iocbq_lookup_len < (0xffff
994                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
995                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
996                 spin_unlock_irq(&phba->hbalock);
997                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
998                                   GFP_KERNEL);
999                 if (new_arr) {
1000                         spin_lock_irq(&phba->hbalock);
1001                         old_arr = psli->iocbq_lookup;
1002                         if (new_len <= psli->iocbq_lookup_len) {
1003                                 /* highly unprobable case */
1004                                 kfree(new_arr);
1005                                 iotag = psli->last_iotag;
1006                                 if(++iotag < psli->iocbq_lookup_len) {
1007                                         psli->last_iotag = iotag;
1008                                         psli->iocbq_lookup[iotag] = iocbq;
1009                                         spin_unlock_irq(&phba->hbalock);
1010                                         iocbq->iotag = iotag;
1011                                         return iotag;
1012                                 }
1013                                 spin_unlock_irq(&phba->hbalock);
1014                                 return 0;
1015                         }
1016                         if (psli->iocbq_lookup)
1017                                 memcpy(new_arr, old_arr,
1018                                        ((psli->last_iotag  + 1) *
1019                                         sizeof (struct lpfc_iocbq *)));
1020                         psli->iocbq_lookup = new_arr;
1021                         psli->iocbq_lookup_len = new_len;
1022                         psli->last_iotag = iotag;
1023                         psli->iocbq_lookup[iotag] = iocbq;
1024                         spin_unlock_irq(&phba->hbalock);
1025                         iocbq->iotag = iotag;
1026                         kfree(old_arr);
1027                         return iotag;
1028                 }
1029         } else
1030                 spin_unlock_irq(&phba->hbalock);
1031
1032         lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
1033                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1034                         psli->last_iotag);
1035
1036         return 0;
1037 }
1038
1039 /**
1040  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1041  * @phba: Pointer to HBA context object.
1042  * @pring: Pointer to driver SLI ring object.
1043  * @iocb: Pointer to iocb slot in the ring.
1044  * @nextiocb: Pointer to driver iocb object which need to be
1045  *            posted to firmware.
1046  *
1047  * This function is called with hbalock held to post a new iocb to
1048  * the firmware. This function copies the new iocb to ring iocb slot and
1049  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1050  * a completion call back for this iocb else the function will free the
1051  * iocb object.
1052  **/
1053 static void
1054 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1055                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1056 {
1057         /*
1058          * Set up an iotag
1059          */
1060         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1061
1062
1063         if (pring->ringno == LPFC_ELS_RING) {
1064                 lpfc_debugfs_slow_ring_trc(phba,
1065                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1066                         *(((uint32_t *) &nextiocb->iocb) + 4),
1067                         *(((uint32_t *) &nextiocb->iocb) + 6),
1068                         *(((uint32_t *) &nextiocb->iocb) + 7));
1069         }
1070
1071         /*
1072          * Issue iocb command to adapter
1073          */
1074         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1075         wmb();
1076         pring->stats.iocb_cmd++;
1077
1078         /*
1079          * If there is no completion routine to call, we can release the
1080          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1081          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1082          */
1083         if (nextiocb->iocb_cmpl)
1084                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1085         else
1086                 __lpfc_sli_release_iocbq(phba, nextiocb);
1087
1088         /*
1089          * Let the HBA know what IOCB slot will be the next one the
1090          * driver will put a command into.
1091          */
1092         pring->cmdidx = pring->next_cmdidx;
1093         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1094 }
1095
1096 /**
1097  * lpfc_sli_update_full_ring - Update the chip attention register
1098  * @phba: Pointer to HBA context object.
1099  * @pring: Pointer to driver SLI ring object.
1100  *
1101  * The caller is not required to hold any lock for calling this function.
1102  * This function updates the chip attention bits for the ring to inform firmware
1103  * that there are pending work to be done for this ring and requests an
1104  * interrupt when there is space available in the ring. This function is
1105  * called when the driver is unable to post more iocbs to the ring due
1106  * to unavailability of space in the ring.
1107  **/
1108 static void
1109 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1110 {
1111         int ringno = pring->ringno;
1112
1113         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1114
1115         wmb();
1116
1117         /*
1118          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1119          * The HBA will tell us when an IOCB entry is available.
1120          */
1121         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1122         readl(phba->CAregaddr); /* flush */
1123
1124         pring->stats.iocb_cmd_full++;
1125 }
1126
1127 /**
1128  * lpfc_sli_update_ring - Update chip attention register
1129  * @phba: Pointer to HBA context object.
1130  * @pring: Pointer to driver SLI ring object.
1131  *
1132  * This function updates the chip attention register bit for the
1133  * given ring to inform HBA that there is more work to be done
1134  * in this ring. The caller is not required to hold any lock.
1135  **/
1136 static void
1137 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1138 {
1139         int ringno = pring->ringno;
1140
1141         /*
1142          * Tell the HBA that there is work to do in this ring.
1143          */
1144         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1145                 wmb();
1146                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1147                 readl(phba->CAregaddr); /* flush */
1148         }
1149 }
1150
1151 /**
1152  * lpfc_sli_resume_iocb - Process iocbs in the txq
1153  * @phba: Pointer to HBA context object.
1154  * @pring: Pointer to driver SLI ring object.
1155  *
1156  * This function is called with hbalock held to post pending iocbs
1157  * in the txq to the firmware. This function is called when driver
1158  * detects space available in the ring.
1159  **/
1160 static void
1161 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1162 {
1163         IOCB_t *iocb;
1164         struct lpfc_iocbq *nextiocb;
1165
1166         /*
1167          * Check to see if:
1168          *  (a) there is anything on the txq to send
1169          *  (b) link is up
1170          *  (c) link attention events can be processed (fcp ring only)
1171          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1172          */
1173         if (pring->txq_cnt &&
1174             lpfc_is_link_up(phba) &&
1175             (pring->ringno != phba->sli.fcp_ring ||
1176              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1177
1178                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1179                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1180                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1181
1182                 if (iocb)
1183                         lpfc_sli_update_ring(phba, pring);
1184                 else
1185                         lpfc_sli_update_full_ring(phba, pring);
1186         }
1187
1188         return;
1189 }
1190
1191 /**
1192  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1193  * @phba: Pointer to HBA context object.
1194  * @hbqno: HBQ number.
1195  *
1196  * This function is called with hbalock held to get the next
1197  * available slot for the given HBQ. If there is free slot
1198  * available for the HBQ it will return pointer to the next available
1199  * HBQ entry else it will return NULL.
1200  **/
1201 static struct lpfc_hbq_entry *
1202 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1203 {
1204         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1205
1206         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1207             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1208                 hbqp->next_hbqPutIdx = 0;
1209
1210         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1211                 uint32_t raw_index = phba->hbq_get[hbqno];
1212                 uint32_t getidx = le32_to_cpu(raw_index);
1213
1214                 hbqp->local_hbqGetIdx = getidx;
1215
1216                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1217                         lpfc_printf_log(phba, KERN_ERR,
1218                                         LOG_SLI | LOG_VPORT,
1219                                         "1802 HBQ %d: local_hbqGetIdx "
1220                                         "%u is > than hbqp->entry_count %u\n",
1221                                         hbqno, hbqp->local_hbqGetIdx,
1222                                         hbqp->entry_count);
1223
1224                         phba->link_state = LPFC_HBA_ERROR;
1225                         return NULL;
1226                 }
1227
1228                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1229                         return NULL;
1230         }
1231
1232         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1233                         hbqp->hbqPutIdx;
1234 }
1235
1236 /**
1237  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1238  * @phba: Pointer to HBA context object.
1239  *
1240  * This function is called with no lock held to free all the
1241  * hbq buffers while uninitializing the SLI interface. It also
1242  * frees the HBQ buffers returned by the firmware but not yet
1243  * processed by the upper layers.
1244  **/
1245 void
1246 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1247 {
1248         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1249         struct hbq_dmabuf *hbq_buf;
1250         unsigned long flags;
1251         int i, hbq_count;
1252         uint32_t hbqno;
1253
1254         hbq_count = lpfc_sli_hbq_count();
1255         /* Return all memory used by all HBQs */
1256         spin_lock_irqsave(&phba->hbalock, flags);
1257         for (i = 0; i < hbq_count; ++i) {
1258                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1259                                 &phba->hbqs[i].hbq_buffer_list, list) {
1260                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1261                         list_del(&hbq_buf->dbuf.list);
1262                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1263                 }
1264                 phba->hbqs[i].buffer_count = 0;
1265         }
1266         /* Return all HBQ buffer that are in-fly */
1267         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1268                                  list) {
1269                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1270                 list_del(&hbq_buf->dbuf.list);
1271                 if (hbq_buf->tag == -1) {
1272                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1273                                 (phba, hbq_buf);
1274                 } else {
1275                         hbqno = hbq_buf->tag >> 16;
1276                         if (hbqno >= LPFC_MAX_HBQS)
1277                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1278                                         (phba, hbq_buf);
1279                         else
1280                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1281                                         hbq_buf);
1282                 }
1283         }
1284
1285         /* Mark the HBQs not in use */
1286         phba->hbq_in_use = 0;
1287         spin_unlock_irqrestore(&phba->hbalock, flags);
1288 }
1289
1290 /**
1291  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1292  * @phba: Pointer to HBA context object.
1293  * @hbqno: HBQ number.
1294  * @hbq_buf: Pointer to HBQ buffer.
1295  *
1296  * This function is called with the hbalock held to post a
1297  * hbq buffer to the firmware. If the function finds an empty
1298  * slot in the HBQ, it will post the buffer. The function will return
1299  * pointer to the hbq entry if it successfully post the buffer
1300  * else it will return NULL.
1301  **/
1302 static int
1303 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1304                          struct hbq_dmabuf *hbq_buf)
1305 {
1306         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1307 }
1308
1309 /**
1310  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1311  * @phba: Pointer to HBA context object.
1312  * @hbqno: HBQ number.
1313  * @hbq_buf: Pointer to HBQ buffer.
1314  *
1315  * This function is called with the hbalock held to post a hbq buffer to the
1316  * firmware. If the function finds an empty slot in the HBQ, it will post the
1317  * buffer and place it on the hbq_buffer_list. The function will return zero if
1318  * it successfully post the buffer else it will return an error.
1319  **/
1320 static int
1321 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1322                             struct hbq_dmabuf *hbq_buf)
1323 {
1324         struct lpfc_hbq_entry *hbqe;
1325         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1326
1327         /* Get next HBQ entry slot to use */
1328         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1329         if (hbqe) {
1330                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1331
1332                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1333                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1334                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1335                 hbqe->bde.tus.f.bdeFlags = 0;
1336                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1337                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1338                                 /* Sync SLIM */
1339                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1340                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1341                                 /* flush */
1342                 readl(phba->hbq_put + hbqno);
1343                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1344                 return 0;
1345         } else
1346                 return -ENOMEM;
1347 }
1348
1349 /**
1350  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1351  * @phba: Pointer to HBA context object.
1352  * @hbqno: HBQ number.
1353  * @hbq_buf: Pointer to HBQ buffer.
1354  *
1355  * This function is called with the hbalock held to post an RQE to the SLI4
1356  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1357  * the hbq_buffer_list and return zero, otherwise it will return an error.
1358  **/
1359 static int
1360 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1361                             struct hbq_dmabuf *hbq_buf)
1362 {
1363         int rc;
1364         struct lpfc_rqe hrqe;
1365         struct lpfc_rqe drqe;
1366
1367         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1368         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1369         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1370         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1371         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1372                               &hrqe, &drqe);
1373         if (rc < 0)
1374                 return rc;
1375         hbq_buf->tag = rc;
1376         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1377         return 0;
1378 }
1379
1380 /* HBQ for ELS and CT traffic. */
1381 static struct lpfc_hbq_init lpfc_els_hbq = {
1382         .rn = 1,
1383         .entry_count = 200,
1384         .mask_count = 0,
1385         .profile = 0,
1386         .ring_mask = (1 << LPFC_ELS_RING),
1387         .buffer_count = 0,
1388         .init_count = 40,
1389         .add_count = 40,
1390 };
1391
1392 /* HBQ for the extra ring if needed */
1393 static struct lpfc_hbq_init lpfc_extra_hbq = {
1394         .rn = 1,
1395         .entry_count = 200,
1396         .mask_count = 0,
1397         .profile = 0,
1398         .ring_mask = (1 << LPFC_EXTRA_RING),
1399         .buffer_count = 0,
1400         .init_count = 0,
1401         .add_count = 5,
1402 };
1403
1404 /* Array of HBQs */
1405 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1406         &lpfc_els_hbq,
1407         &lpfc_extra_hbq,
1408 };
1409
1410 /**
1411  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1412  * @phba: Pointer to HBA context object.
1413  * @hbqno: HBQ number.
1414  * @count: Number of HBQ buffers to be posted.
1415  *
1416  * This function is called with no lock held to post more hbq buffers to the
1417  * given HBQ. The function returns the number of HBQ buffers successfully
1418  * posted.
1419  **/
1420 static int
1421 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1422 {
1423         uint32_t i, posted = 0;
1424         unsigned long flags;
1425         struct hbq_dmabuf *hbq_buffer;
1426         LIST_HEAD(hbq_buf_list);
1427         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1428                 return 0;
1429
1430         if ((phba->hbqs[hbqno].buffer_count + count) >
1431             lpfc_hbq_defs[hbqno]->entry_count)
1432                 count = lpfc_hbq_defs[hbqno]->entry_count -
1433                                         phba->hbqs[hbqno].buffer_count;
1434         if (!count)
1435                 return 0;
1436         /* Allocate HBQ entries */
1437         for (i = 0; i < count; i++) {
1438                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1439                 if (!hbq_buffer)
1440                         break;
1441                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1442         }
1443         /* Check whether HBQ is still in use */
1444         spin_lock_irqsave(&phba->hbalock, flags);
1445         if (!phba->hbq_in_use)
1446                 goto err;
1447         while (!list_empty(&hbq_buf_list)) {
1448                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1449                                  dbuf.list);
1450                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1451                                       (hbqno << 16));
1452                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1453                         phba->hbqs[hbqno].buffer_count++;
1454                         posted++;
1455                 } else
1456                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1457         }
1458         spin_unlock_irqrestore(&phba->hbalock, flags);
1459         return posted;
1460 err:
1461         spin_unlock_irqrestore(&phba->hbalock, flags);
1462         while (!list_empty(&hbq_buf_list)) {
1463                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1464                                  dbuf.list);
1465                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1466         }
1467         return 0;
1468 }
1469
1470 /**
1471  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1472  * @phba: Pointer to HBA context object.
1473  * @qno: HBQ number.
1474  *
1475  * This function posts more buffers to the HBQ. This function
1476  * is called with no lock held. The function returns the number of HBQ entries
1477  * successfully allocated.
1478  **/
1479 int
1480 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1481 {
1482         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1483                                          lpfc_hbq_defs[qno]->add_count));
1484 }
1485
1486 /**
1487  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1488  * @phba: Pointer to HBA context object.
1489  * @qno:  HBQ queue number.
1490  *
1491  * This function is called from SLI initialization code path with
1492  * no lock held to post initial HBQ buffers to firmware. The
1493  * function returns the number of HBQ entries successfully allocated.
1494  **/
1495 static int
1496 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1497 {
1498         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1499                                          lpfc_hbq_defs[qno]->init_count));
1500 }
1501
1502 /**
1503  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1504  * @phba: Pointer to HBA context object.
1505  * @hbqno: HBQ number.
1506  *
1507  * This function removes the first hbq buffer on an hbq list and returns a
1508  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1509  **/
1510 static struct hbq_dmabuf *
1511 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1512 {
1513         struct lpfc_dmabuf *d_buf;
1514
1515         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1516         if (!d_buf)
1517                 return NULL;
1518         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1519 }
1520
1521 /**
1522  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1523  * @phba: Pointer to HBA context object.
1524  * @tag: Tag of the hbq buffer.
1525  *
1526  * This function is called with hbalock held. This function searches
1527  * for the hbq buffer associated with the given tag in the hbq buffer
1528  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1529  * it returns NULL.
1530  **/
1531 static struct hbq_dmabuf *
1532 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1533 {
1534         struct lpfc_dmabuf *d_buf;
1535         struct hbq_dmabuf *hbq_buf;
1536         uint32_t hbqno;
1537
1538         hbqno = tag >> 16;
1539         if (hbqno >= LPFC_MAX_HBQS)
1540                 return NULL;
1541
1542         spin_lock_irq(&phba->hbalock);
1543         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1544                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1545                 if (hbq_buf->tag == tag) {
1546                         spin_unlock_irq(&phba->hbalock);
1547                         return hbq_buf;
1548                 }
1549         }
1550         spin_unlock_irq(&phba->hbalock);
1551         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1552                         "1803 Bad hbq tag. Data: x%x x%x\n",
1553                         tag, phba->hbqs[tag >> 16].buffer_count);
1554         return NULL;
1555 }
1556
1557 /**
1558  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1559  * @phba: Pointer to HBA context object.
1560  * @hbq_buffer: Pointer to HBQ buffer.
1561  *
1562  * This function is called with hbalock. This function gives back
1563  * the hbq buffer to firmware. If the HBQ does not have space to
1564  * post the buffer, it will free the buffer.
1565  **/
1566 void
1567 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1568 {
1569         uint32_t hbqno;
1570
1571         if (hbq_buffer) {
1572                 hbqno = hbq_buffer->tag >> 16;
1573                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1574                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1575         }
1576 }
1577
1578 /**
1579  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1580  * @mbxCommand: mailbox command code.
1581  *
1582  * This function is called by the mailbox event handler function to verify
1583  * that the completed mailbox command is a legitimate mailbox command. If the
1584  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1585  * and the mailbox event handler will take the HBA offline.
1586  **/
1587 static int
1588 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1589 {
1590         uint8_t ret;
1591
1592         switch (mbxCommand) {
1593         case MBX_LOAD_SM:
1594         case MBX_READ_NV:
1595         case MBX_WRITE_NV:
1596         case MBX_WRITE_VPARMS:
1597         case MBX_RUN_BIU_DIAG:
1598         case MBX_INIT_LINK:
1599         case MBX_DOWN_LINK:
1600         case MBX_CONFIG_LINK:
1601         case MBX_CONFIG_RING:
1602         case MBX_RESET_RING:
1603         case MBX_READ_CONFIG:
1604         case MBX_READ_RCONFIG:
1605         case MBX_READ_SPARM:
1606         case MBX_READ_STATUS:
1607         case MBX_READ_RPI:
1608         case MBX_READ_XRI:
1609         case MBX_READ_REV:
1610         case MBX_READ_LNK_STAT:
1611         case MBX_REG_LOGIN:
1612         case MBX_UNREG_LOGIN:
1613         case MBX_READ_LA:
1614         case MBX_CLEAR_LA:
1615         case MBX_DUMP_MEMORY:
1616         case MBX_DUMP_CONTEXT:
1617         case MBX_RUN_DIAGS:
1618         case MBX_RESTART:
1619         case MBX_UPDATE_CFG:
1620         case MBX_DOWN_LOAD:
1621         case MBX_DEL_LD_ENTRY:
1622         case MBX_RUN_PROGRAM:
1623         case MBX_SET_MASK:
1624         case MBX_SET_VARIABLE:
1625         case MBX_UNREG_D_ID:
1626         case MBX_KILL_BOARD:
1627         case MBX_CONFIG_FARP:
1628         case MBX_BEACON:
1629         case MBX_LOAD_AREA:
1630         case MBX_RUN_BIU_DIAG64:
1631         case MBX_CONFIG_PORT:
1632         case MBX_READ_SPARM64:
1633         case MBX_READ_RPI64:
1634         case MBX_REG_LOGIN64:
1635         case MBX_READ_LA64:
1636         case MBX_WRITE_WWN:
1637         case MBX_SET_DEBUG:
1638         case MBX_LOAD_EXP_ROM:
1639         case MBX_ASYNCEVT_ENABLE:
1640         case MBX_REG_VPI:
1641         case MBX_UNREG_VPI:
1642         case MBX_HEARTBEAT:
1643         case MBX_PORT_CAPABILITIES:
1644         case MBX_PORT_IOV_CONTROL:
1645         case MBX_SLI4_CONFIG:
1646         case MBX_SLI4_REQ_FTRS:
1647         case MBX_REG_FCFI:
1648         case MBX_UNREG_FCFI:
1649         case MBX_REG_VFI:
1650         case MBX_UNREG_VFI:
1651         case MBX_INIT_VPI:
1652         case MBX_INIT_VFI:
1653         case MBX_RESUME_RPI:
1654                 ret = mbxCommand;
1655                 break;
1656         default:
1657                 ret = MBX_SHUTDOWN;
1658                 break;
1659         }
1660         return ret;
1661 }
1662
1663 /**
1664  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
1665  * @phba: Pointer to HBA context object.
1666  * @pmboxq: Pointer to mailbox command.
1667  *
1668  * This is completion handler function for mailbox commands issued from
1669  * lpfc_sli_issue_mbox_wait function. This function is called by the
1670  * mailbox event handler function with no lock held. This function
1671  * will wake up thread waiting on the wait queue pointed by context1
1672  * of the mailbox.
1673  **/
1674 void
1675 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1676 {
1677         wait_queue_head_t *pdone_q;
1678         unsigned long drvr_flag;
1679
1680         /*
1681          * If pdone_q is empty, the driver thread gave up waiting and
1682          * continued running.
1683          */
1684         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
1685         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1686         pdone_q = (wait_queue_head_t *) pmboxq->context1;
1687         if (pdone_q)
1688                 wake_up_interruptible(pdone_q);
1689         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1690         return;
1691 }
1692
1693
1694 /**
1695  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
1696  * @phba: Pointer to HBA context object.
1697  * @pmb: Pointer to mailbox object.
1698  *
1699  * This function is the default mailbox completion handler. It
1700  * frees the memory resources associated with the completed mailbox
1701  * command. If the completed command is a REG_LOGIN mailbox command,
1702  * this function will issue a UREG_LOGIN to re-claim the RPI.
1703  **/
1704 void
1705 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1706 {
1707         struct lpfc_dmabuf *mp;
1708         uint16_t rpi, vpi;
1709         int rc;
1710
1711         mp = (struct lpfc_dmabuf *) (pmb->context1);
1712
1713         if (mp) {
1714                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1715                 kfree(mp);
1716         }
1717
1718         if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1719             (phba->sli_rev == LPFC_SLI_REV4))
1720                 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1721
1722         /*
1723          * If a REG_LOGIN succeeded  after node is destroyed or node
1724          * is in re-discovery driver need to cleanup the RPI.
1725          */
1726         if (!(phba->pport->load_flag & FC_UNLOADING) &&
1727             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1728             !pmb->u.mb.mbxStatus) {
1729                 rpi = pmb->u.mb.un.varWords[0];
1730                 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1731                 lpfc_unreg_login(phba, vpi, rpi, pmb);
1732                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1733                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1734                 if (rc != MBX_NOT_FINISHED)
1735                         return;
1736         }
1737
1738         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1739                 lpfc_sli4_mbox_cmd_free(phba, pmb);
1740         else
1741                 mempool_free(pmb, phba->mbox_mem_pool);
1742 }
1743
1744 /**
1745  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
1746  * @phba: Pointer to HBA context object.
1747  *
1748  * This function is called with no lock held. This function processes all
1749  * the completed mailbox commands and gives it to upper layers. The interrupt
1750  * service routine processes mailbox completion interrupt and adds completed
1751  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
1752  * Worker thread call lpfc_sli_handle_mb_event, which will return the
1753  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
1754  * function returns the mailbox commands to the upper layer by calling the
1755  * completion handler function of each mailbox.
1756  **/
1757 int
1758 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1759 {
1760         MAILBOX_t *pmbox;
1761         LPFC_MBOXQ_t *pmb;
1762         int rc;
1763         LIST_HEAD(cmplq);
1764
1765         phba->sli.slistat.mbox_event++;
1766
1767         /* Get all completed mailboxe buffers into the cmplq */
1768         spin_lock_irq(&phba->hbalock);
1769         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
1770         spin_unlock_irq(&phba->hbalock);
1771
1772         /* Get a Mailbox buffer to setup mailbox commands for callback */
1773         do {
1774                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
1775                 if (pmb == NULL)
1776                         break;
1777
1778                 pmbox = &pmb->u.mb;
1779
1780                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1781                         if (pmb->vport) {
1782                                 lpfc_debugfs_disc_trc(pmb->vport,
1783                                         LPFC_DISC_TRC_MBOX_VPORT,
1784                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
1785                                         (uint32_t)pmbox->mbxCommand,
1786                                         pmbox->un.varWords[0],
1787                                         pmbox->un.varWords[1]);
1788                         }
1789                         else {
1790                                 lpfc_debugfs_disc_trc(phba->pport,
1791                                         LPFC_DISC_TRC_MBOX,
1792                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
1793                                         (uint32_t)pmbox->mbxCommand,
1794                                         pmbox->un.varWords[0],
1795                                         pmbox->un.varWords[1]);
1796                         }
1797                 }
1798
1799                 /*
1800                  * It is a fatal error if unknown mbox command completion.
1801                  */
1802                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1803                     MBX_SHUTDOWN) {
1804                         /* Unknow mailbox command compl */
1805                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1806                                         "(%d):0323 Unknown Mailbox command "
1807                                         "x%x (x%x) Cmpl\n",
1808                                         pmb->vport ? pmb->vport->vpi : 0,
1809                                         pmbox->mbxCommand,
1810                                         lpfc_sli4_mbox_opcode_get(phba, pmb));
1811                         phba->link_state = LPFC_HBA_ERROR;
1812                         phba->work_hs = HS_FFER3;
1813                         lpfc_handle_eratt(phba);
1814                         continue;
1815                 }
1816
1817                 if (pmbox->mbxStatus) {
1818                         phba->sli.slistat.mbox_stat_err++;
1819                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
1820                                 /* Mbox cmd cmpl error - RETRYing */
1821                                 lpfc_printf_log(phba, KERN_INFO,
1822                                                 LOG_MBOX | LOG_SLI,
1823                                                 "(%d):0305 Mbox cmd cmpl "
1824                                                 "error - RETRYing Data: x%x "
1825                                                 "(x%x) x%x x%x x%x\n",
1826                                                 pmb->vport ? pmb->vport->vpi :0,
1827                                                 pmbox->mbxCommand,
1828                                                 lpfc_sli4_mbox_opcode_get(phba,
1829                                                                           pmb),
1830                                                 pmbox->mbxStatus,
1831                                                 pmbox->un.varWords[0],
1832                                                 pmb->vport->port_state);
1833                                 pmbox->mbxStatus = 0;
1834                                 pmbox->mbxOwner = OWN_HOST;
1835                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1836                                 if (rc != MBX_NOT_FINISHED)
1837                                         continue;
1838                         }
1839                 }
1840
1841                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1842                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1843                                 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1844                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1845                                 pmb->vport ? pmb->vport->vpi : 0,
1846                                 pmbox->mbxCommand,
1847                                 lpfc_sli4_mbox_opcode_get(phba, pmb),
1848                                 pmb->mbox_cmpl,
1849                                 *((uint32_t *) pmbox),
1850                                 pmbox->un.varWords[0],
1851                                 pmbox->un.varWords[1],
1852                                 pmbox->un.varWords[2],
1853                                 pmbox->un.varWords[3],
1854                                 pmbox->un.varWords[4],
1855                                 pmbox->un.varWords[5],
1856                                 pmbox->un.varWords[6],
1857                                 pmbox->un.varWords[7]);
1858
1859                 if (pmb->mbox_cmpl)
1860                         pmb->mbox_cmpl(phba,pmb);
1861         } while (1);
1862         return 0;
1863 }
1864
1865 /**
1866  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
1867  * @phba: Pointer to HBA context object.
1868  * @pring: Pointer to driver SLI ring object.
1869  * @tag: buffer tag.
1870  *
1871  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
1872  * is set in the tag the buffer is posted for a particular exchange,
1873  * the function will return the buffer without replacing the buffer.
1874  * If the buffer is for unsolicited ELS or CT traffic, this function
1875  * returns the buffer and also posts another buffer to the firmware.
1876  **/
1877 static struct lpfc_dmabuf *
1878 lpfc_sli_get_buff(struct lpfc_hba *phba,
1879                   struct lpfc_sli_ring *pring,
1880                   uint32_t tag)
1881 {
1882         struct hbq_dmabuf *hbq_entry;
1883
1884         if (tag & QUE_BUFTAG_BIT)
1885                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1886         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1887         if (!hbq_entry)
1888                 return NULL;
1889         return &hbq_entry->dbuf;
1890 }
1891
1892 /**
1893  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1894  * @phba: Pointer to HBA context object.
1895  * @pring: Pointer to driver SLI ring object.
1896  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1897  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1898  * @fch_type: the type for the first frame of the sequence.
1899  *
1900  * This function is called with no lock held. This function uses the r_ctl and
1901  * type of the received sequence to find the correct callback function to call
1902  * to process the sequence.
1903  **/
1904 static int
1905 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1906                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1907                          uint32_t fch_type)
1908 {
1909         int i;
1910
1911         /* unSolicited Responses */
1912         if (pring->prt[0].profile) {
1913                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1914                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1915                                                                         saveq);
1916                 return 1;
1917         }
1918         /* We must search, based on rctl / type
1919            for the right routine */
1920         for (i = 0; i < pring->num_mask; i++) {
1921                 if ((pring->prt[i].rctl == fch_r_ctl) &&
1922                     (pring->prt[i].type == fch_type)) {
1923                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1924                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1925                                                 (phba, pring, saveq);
1926                         return 1;
1927                 }
1928         }
1929         return 0;
1930 }
1931
1932 /**
1933  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
1934  * @phba: Pointer to HBA context object.
1935  * @pring: Pointer to driver SLI ring object.
1936  * @saveq: Pointer to the unsolicited iocb.
1937  *
1938  * This function is called with no lock held by the ring event handler
1939  * when there is an unsolicited iocb posted to the response ring by the
1940  * firmware. This function gets the buffer associated with the iocbs
1941  * and calls the event handler for the ring. This function handles both
1942  * qring buffers and hbq buffers.
1943  * When the function returns 1 the caller can free the iocb object otherwise
1944  * upper layer functions will free the iocb objects.
1945  **/
1946 static int
1947 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1948                             struct lpfc_iocbq *saveq)
1949 {
1950         IOCB_t           * irsp;
1951         WORD5            * w5p;
1952         uint32_t           Rctl, Type;
1953         uint32_t           match;
1954         struct lpfc_iocbq *iocbq;
1955         struct lpfc_dmabuf *dmzbuf;
1956
1957         match = 0;
1958         irsp = &(saveq->iocb);
1959
1960         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1961                 if (pring->lpfc_sli_rcv_async_status)
1962                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
1963                 else
1964                         lpfc_printf_log(phba,
1965                                         KERN_WARNING,
1966                                         LOG_SLI,
1967                                         "0316 Ring %d handler: unexpected "
1968                                         "ASYNC_STATUS iocb received evt_code "
1969                                         "0x%x\n",
1970                                         pring->ringno,
1971                                         irsp->un.asyncstat.evt_code);
1972                 return 1;
1973         }
1974
1975         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
1976                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
1977                 if (irsp->ulpBdeCount > 0) {
1978                         dmzbuf = lpfc_sli_get_buff(phba, pring,
1979                                         irsp->un.ulpWord[3]);
1980                         lpfc_in_buf_free(phba, dmzbuf);
1981                 }
1982
1983                 if (irsp->ulpBdeCount > 1) {
1984                         dmzbuf = lpfc_sli_get_buff(phba, pring,
1985                                         irsp->unsli3.sli3Words[3]);
1986                         lpfc_in_buf_free(phba, dmzbuf);
1987                 }
1988
1989                 if (irsp->ulpBdeCount > 2) {
1990                         dmzbuf = lpfc_sli_get_buff(phba, pring,
1991                                 irsp->unsli3.sli3Words[7]);
1992                         lpfc_in_buf_free(phba, dmzbuf);
1993                 }
1994
1995                 return 1;
1996         }
1997
1998         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1999                 if (irsp->ulpBdeCount != 0) {
2000                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2001                                                 irsp->un.ulpWord[3]);
2002                         if (!saveq->context2)
2003                                 lpfc_printf_log(phba,
2004                                         KERN_ERR,
2005                                         LOG_SLI,
2006                                         "0341 Ring %d Cannot find buffer for "
2007                                         "an unsolicited iocb. tag 0x%x\n",
2008                                         pring->ringno,
2009                                         irsp->un.ulpWord[3]);
2010                 }
2011                 if (irsp->ulpBdeCount == 2) {
2012                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2013                                                 irsp->unsli3.sli3Words[7]);
2014                         if (!saveq->context3)
2015                                 lpfc_printf_log(phba,
2016                                         KERN_ERR,
2017                                         LOG_SLI,
2018                                         "0342 Ring %d Cannot find buffer for an"
2019                                         " unsolicited iocb. tag 0x%x\n",
2020                                         pring->ringno,
2021                                         irsp->unsli3.sli3Words[7]);
2022                 }
2023                 list_for_each_entry(iocbq, &saveq->list, list) {
2024                         irsp = &(iocbq->iocb);
2025                         if (irsp->ulpBdeCount != 0) {
2026                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2027                                                         irsp->un.ulpWord[3]);
2028                                 if (!iocbq->context2)
2029                                         lpfc_printf_log(phba,
2030                                                 KERN_ERR,
2031                                                 LOG_SLI,
2032                                                 "0343 Ring %d Cannot find "
2033                                                 "buffer for an unsolicited iocb"
2034                                                 ". tag 0x%x\n", pring->ringno,
2035                                                 irsp->un.ulpWord[3]);
2036                         }
2037                         if (irsp->ulpBdeCount == 2) {
2038                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2039                                                 irsp->unsli3.sli3Words[7]);
2040                                 if (!iocbq->context3)
2041                                         lpfc_printf_log(phba,
2042                                                 KERN_ERR,
2043                                                 LOG_SLI,
2044                                                 "0344 Ring %d Cannot find "
2045                                                 "buffer for an unsolicited "
2046                                                 "iocb. tag 0x%x\n",
2047                                                 pring->ringno,
2048                                                 irsp->unsli3.sli3Words[7]);
2049                         }
2050                 }
2051         }
2052         if (irsp->ulpBdeCount != 0 &&
2053             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2054              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2055                 int found = 0;
2056
2057                 /* search continue save q for same XRI */
2058                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2059                         if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2060                                 list_add_tail(&saveq->list, &iocbq->list);
2061                                 found = 1;
2062                                 break;
2063                         }
2064                 }
2065                 if (!found)
2066                         list_add_tail(&saveq->clist,
2067                                       &pring->iocb_continue_saveq);
2068                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2069                         list_del_init(&iocbq->clist);
2070                         saveq = iocbq;
2071                         irsp = &(saveq->iocb);
2072                 } else
2073                         return 0;
2074         }
2075         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2076             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2077             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2078                 Rctl = FC_RCTL_ELS_REQ;
2079                 Type = FC_TYPE_ELS;
2080         } else {
2081                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2082                 Rctl = w5p->hcsw.Rctl;
2083                 Type = w5p->hcsw.Type;
2084
2085                 /* Firmware Workaround */
2086                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2087                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2088                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2089                         Rctl = FC_RCTL_ELS_REQ;
2090                         Type = FC_TYPE_ELS;
2091                         w5p->hcsw.Rctl = Rctl;
2092                         w5p->hcsw.Type = Type;
2093                 }
2094         }
2095
2096         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2097                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2098                                 "0313 Ring %d handler: unexpected Rctl x%x "
2099                                 "Type x%x received\n",
2100                                 pring->ringno, Rctl, Type);
2101
2102         return 1;
2103 }
2104
2105 /**
2106  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2107  * @phba: Pointer to HBA context object.
2108  * @pring: Pointer to driver SLI ring object.
2109  * @prspiocb: Pointer to response iocb object.
2110  *
2111  * This function looks up the iocb_lookup table to get the command iocb
2112  * corresponding to the given response iocb using the iotag of the
2113  * response iocb. This function is called with the hbalock held.
2114  * This function returns the command iocb object if it finds the command
2115  * iocb else returns NULL.
2116  **/
2117 static struct lpfc_iocbq *
2118 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2119                       struct lpfc_sli_ring *pring,
2120                       struct lpfc_iocbq *prspiocb)
2121 {
2122         struct lpfc_iocbq *cmd_iocb = NULL;
2123         uint16_t iotag;
2124
2125         iotag = prspiocb->iocb.ulpIoTag;
2126
2127         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2128                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2129                 list_del_init(&cmd_iocb->list);
2130                 pring->txcmplq_cnt--;
2131                 return cmd_iocb;
2132         }
2133
2134         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2135                         "0317 iotag x%x is out off "
2136                         "range: max iotag x%x wd0 x%x\n",
2137                         iotag, phba->sli.last_iotag,
2138                         *(((uint32_t *) &prspiocb->iocb) + 7));
2139         return NULL;
2140 }
2141
2142 /**
2143  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2144  * @phba: Pointer to HBA context object.
2145  * @pring: Pointer to driver SLI ring object.
2146  * @iotag: IOCB tag.
2147  *
2148  * This function looks up the iocb_lookup table to get the command iocb
2149  * corresponding to the given iotag. This function is called with the
2150  * hbalock held.
2151  * This function returns the command iocb object if it finds the command
2152  * iocb else returns NULL.
2153  **/
2154 static struct lpfc_iocbq *
2155 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2156                              struct lpfc_sli_ring *pring, uint16_t iotag)
2157 {
2158         struct lpfc_iocbq *cmd_iocb;
2159
2160         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2161                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2162                 list_del_init(&cmd_iocb->list);
2163                 pring->txcmplq_cnt--;
2164                 return cmd_iocb;
2165         }
2166
2167         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2168                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2169                         iotag, phba->sli.last_iotag);
2170         return NULL;
2171 }
2172
2173 /**
2174  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2175  * @phba: Pointer to HBA context object.
2176  * @pring: Pointer to driver SLI ring object.
2177  * @saveq: Pointer to the response iocb to be processed.
2178  *
2179  * This function is called by the ring event handler for non-fcp
2180  * rings when there is a new response iocb in the response ring.
2181  * The caller is not required to hold any locks. This function
2182  * gets the command iocb associated with the response iocb and
2183  * calls the completion handler for the command iocb. If there
2184  * is no completion handler, the function will free the resources
2185  * associated with command iocb. If the response iocb is for
2186  * an already aborted command iocb, the status of the completion
2187  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2188  * This function always returns 1.
2189  **/
2190 static int
2191 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2192                           struct lpfc_iocbq *saveq)
2193 {
2194         struct lpfc_iocbq *cmdiocbp;
2195         int rc = 1;
2196         unsigned long iflag;
2197
2198         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2199         spin_lock_irqsave(&phba->hbalock, iflag);
2200         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2201         spin_unlock_irqrestore(&phba->hbalock, iflag);
2202
2203         if (cmdiocbp) {
2204                 if (cmdiocbp->iocb_cmpl) {
2205                         /*
2206                          * If an ELS command failed send an event to mgmt
2207                          * application.
2208                          */
2209                         if (saveq->iocb.ulpStatus &&
2210                              (pring->ringno == LPFC_ELS_RING) &&
2211                              (cmdiocbp->iocb.ulpCommand ==
2212                                 CMD_ELS_REQUEST64_CR))
2213                                 lpfc_send_els_failure_event(phba,
2214                                         cmdiocbp, saveq);
2215
2216                         /*
2217                          * Post all ELS completions to the worker thread.
2218                          * All other are passed to the completion callback.
2219                          */
2220                         if (pring->ringno == LPFC_ELS_RING) {
2221                                 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
2222                                         cmdiocbp->iocb_flag &=
2223                                                 ~LPFC_DRIVER_ABORTED;
2224                                         saveq->iocb.ulpStatus =
2225                                                 IOSTAT_LOCAL_REJECT;
2226                                         saveq->iocb.un.ulpWord[4] =
2227                                                 IOERR_SLI_ABORTED;
2228
2229                                         /* Firmware could still be in progress
2230                                          * of DMAing payload, so don't free data
2231                                          * buffer till after a hbeat.
2232                                          */
2233                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2234                                 }
2235                         }
2236                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2237                 } else
2238                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2239         } else {
2240                 /*
2241                  * Unknown initiating command based on the response iotag.
2242                  * This could be the case on the ELS ring because of
2243                  * lpfc_els_abort().
2244                  */
2245                 if (pring->ringno != LPFC_ELS_RING) {
2246                         /*
2247                          * Ring <ringno> handler: unexpected completion IoTag
2248                          * <IoTag>
2249                          */
2250                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2251                                          "0322 Ring %d handler: "
2252                                          "unexpected completion IoTag x%x "
2253                                          "Data: x%x x%x x%x x%x\n",
2254                                          pring->ringno,
2255                                          saveq->iocb.ulpIoTag,
2256                                          saveq->iocb.ulpStatus,
2257                                          saveq->iocb.un.ulpWord[4],
2258                                          saveq->iocb.ulpCommand,
2259                                          saveq->iocb.ulpContext);
2260                 }
2261         }
2262
2263         return rc;
2264 }
2265
2266 /**
2267  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2268  * @phba: Pointer to HBA context object.
2269  * @pring: Pointer to driver SLI ring object.
2270  *
2271  * This function is called from the iocb ring event handlers when
2272  * put pointer is ahead of the get pointer for a ring. This function signal
2273  * an error attention condition to the worker thread and the worker
2274  * thread will transition the HBA to offline state.
2275  **/
2276 static void
2277 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2278 {
2279         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2280         /*
2281          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2282          * rsp ring <portRspMax>
2283          */
2284         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2285                         "0312 Ring %d handler: portRspPut %d "
2286                         "is bigger than rsp ring %d\n",
2287                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2288                         pring->numRiocb);
2289
2290         phba->link_state = LPFC_HBA_ERROR;
2291
2292         /*
2293          * All error attention handlers are posted to
2294          * worker thread
2295          */
2296         phba->work_ha |= HA_ERATT;
2297         phba->work_hs = HS_FFER3;
2298
2299         lpfc_worker_wake_up(phba);
2300
2301         return;
2302 }
2303
2304 /**
2305  * lpfc_poll_eratt - Error attention polling timer timeout handler
2306  * @ptr: Pointer to address of HBA context object.
2307  *
2308  * This function is invoked by the Error Attention polling timer when the
2309  * timer times out. It will check the SLI Error Attention register for
2310  * possible attention events. If so, it will post an Error Attention event
2311  * and wake up worker thread to process it. Otherwise, it will set up the
2312  * Error Attention polling timer for the next poll.
2313  **/
2314 void lpfc_poll_eratt(unsigned long ptr)
2315 {
2316         struct lpfc_hba *phba;
2317         uint32_t eratt = 0;
2318
2319         phba = (struct lpfc_hba *)ptr;
2320
2321         /* Check chip HA register for error event */
2322         eratt = lpfc_sli_check_eratt(phba);
2323
2324         if (eratt)
2325                 /* Tell the worker thread there is work to do */
2326                 lpfc_worker_wake_up(phba);
2327         else
2328                 /* Restart the timer for next eratt poll */
2329                 mod_timer(&phba->eratt_poll, jiffies +
2330                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2331         return;
2332 }
2333
2334
2335 /**
2336  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2337  * @phba: Pointer to HBA context object.
2338  * @pring: Pointer to driver SLI ring object.
2339  * @mask: Host attention register mask for this ring.
2340  *
2341  * This function is called from the interrupt context when there is a ring
2342  * event for the fcp ring. The caller does not hold any lock.
2343  * The function processes each response iocb in the response ring until it
2344  * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2345  * LE bit set. The function will call the completion handler of the command iocb
2346  * if the response iocb indicates a completion for a command iocb or it is
2347  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2348  * function if this is an unsolicited iocb.
2349  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2350  * to check it explicitly.
2351  */
2352 int
2353 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2354                                 struct lpfc_sli_ring *pring, uint32_t mask)
2355 {
2356         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2357         IOCB_t *irsp = NULL;
2358         IOCB_t *entry = NULL;
2359         struct lpfc_iocbq *cmdiocbq = NULL;
2360         struct lpfc_iocbq rspiocbq;
2361         uint32_t status;
2362         uint32_t portRspPut, portRspMax;
2363         int rc = 1;
2364         lpfc_iocb_type type;
2365         unsigned long iflag;
2366         uint32_t rsp_cmpl = 0;
2367
2368         spin_lock_irqsave(&phba->hbalock, iflag);
2369         pring->stats.iocb_event++;
2370
2371         /*
2372          * The next available response entry should never exceed the maximum
2373          * entries.  If it does, treat it as an adapter hardware error.
2374          */
2375         portRspMax = pring->numRiocb;
2376         portRspPut = le32_to_cpu(pgp->rspPutInx);
2377         if (unlikely(portRspPut >= portRspMax)) {
2378                 lpfc_sli_rsp_pointers_error(phba, pring);
2379                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2380                 return 1;
2381         }
2382         if (phba->fcp_ring_in_use) {
2383                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2384                 return 1;
2385         } else
2386                 phba->fcp_ring_in_use = 1;
2387
2388         rmb();
2389         while (pring->rspidx != portRspPut) {
2390                 /*
2391                  * Fetch an entry off the ring and copy it into a local data
2392                  * structure.  The copy involves a byte-swap since the
2393                  * network byte order and pci byte orders are different.
2394                  */
2395                 entry = lpfc_resp_iocb(phba, pring);
2396                 phba->last_completion_time = jiffies;
2397
2398                 if (++pring->rspidx >= portRspMax)
2399                         pring->rspidx = 0;
2400
2401                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2402                                       (uint32_t *) &rspiocbq.iocb,
2403                                       phba->iocb_rsp_size);
2404                 INIT_LIST_HEAD(&(rspiocbq.list));
2405                 irsp = &rspiocbq.iocb;
2406
2407                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2408                 pring->stats.iocb_rsp++;
2409                 rsp_cmpl++;
2410
2411                 if (unlikely(irsp->ulpStatus)) {
2412                         /*
2413                          * If resource errors reported from HBA, reduce
2414                          * queuedepths of the SCSI device.
2415                          */
2416                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2417                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2418                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2419                                 phba->lpfc_rampdown_queue_depth(phba);
2420                                 spin_lock_irqsave(&phba->hbalock, iflag);
2421                         }
2422
2423                         /* Rsp ring <ringno> error: IOCB */
2424                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2425                                         "0336 Rsp Ring %d error: IOCB Data: "
2426                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2427                                         pring->ringno,
2428                                         irsp->un.ulpWord[0],
2429                                         irsp->un.ulpWord[1],
2430                                         irsp->un.ulpWord[2],
2431                                         irsp->un.ulpWord[3],
2432                                         irsp->un.ulpWord[4],
2433                                         irsp->un.ulpWord[5],
2434                                         *(uint32_t *)&irsp->un1,
2435                                         *((uint32_t *)&irsp->un1 + 1));
2436                 }
2437
2438                 switch (type) {
2439                 case LPFC_ABORT_IOCB:
2440                 case LPFC_SOL_IOCB:
2441                         /*
2442                          * Idle exchange closed via ABTS from port.  No iocb
2443                          * resources need to be recovered.
2444                          */
2445                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2446                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2447                                                 "0333 IOCB cmd 0x%x"
2448                                                 " processed. Skipping"
2449                                                 " completion\n",
2450                                                 irsp->ulpCommand);
2451                                 break;
2452                         }
2453
2454                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2455                                                          &rspiocbq);
2456                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2457                                         spin_unlock_irqrestore(&phba->hbalock,
2458                                                                iflag);
2459                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2460                                                               &rspiocbq);
2461                                         spin_lock_irqsave(&phba->hbalock,
2462                                                           iflag);
2463                                 }
2464                         break;
2465                 case LPFC_UNSOL_IOCB:
2466                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2467                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2468                         spin_lock_irqsave(&phba->hbalock, iflag);
2469                         break;
2470                 default:
2471                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2472                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2473                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2474                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2475                                        MAX_MSG_DATA);
2476                                 dev_warn(&((phba->pcidev)->dev),
2477                                          "lpfc%d: %s\n",
2478                                          phba->brd_no, adaptermsg);
2479                         } else {
2480                                 /* Unknown IOCB command */
2481                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2482                                                 "0334 Unknown IOCB command "
2483                                                 "Data: x%x, x%x x%x x%x x%x\n",
2484                                                 type, irsp->ulpCommand,
2485                                                 irsp->ulpStatus,
2486                                                 irsp->ulpIoTag,
2487                                                 irsp->ulpContext);
2488                         }
2489                         break;
2490                 }
2491
2492                 /*
2493                  * The response IOCB has been processed.  Update the ring
2494                  * pointer in SLIM.  If the port response put pointer has not
2495                  * been updated, sync the pgp->rspPutInx and fetch the new port
2496                  * response put pointer.
2497                  */
2498                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2499
2500                 if (pring->rspidx == portRspPut)
2501                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2502         }
2503
2504         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2505                 pring->stats.iocb_rsp_full++;
2506                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2507                 writel(status, phba->CAregaddr);
2508                 readl(phba->CAregaddr);
2509         }
2510         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2511                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2512                 pring->stats.iocb_cmd_empty++;
2513
2514                 /* Force update of the local copy of cmdGetInx */
2515                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2516                 lpfc_sli_resume_iocb(phba, pring);
2517
2518                 if ((pring->lpfc_sli_cmd_available))
2519                         (pring->lpfc_sli_cmd_available) (phba, pring);
2520
2521         }
2522
2523         phba->fcp_ring_in_use = 0;
2524         spin_unlock_irqrestore(&phba->hbalock, iflag);
2525         return rc;
2526 }
2527
2528 /**
2529  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2530  * @phba: Pointer to HBA context object.
2531  * @pring: Pointer to driver SLI ring object.
2532  * @rspiocbp: Pointer to driver response IOCB object.
2533  *
2534  * This function is called from the worker thread when there is a slow-path
2535  * response IOCB to process. This function chains all the response iocbs until
2536  * seeing the iocb with the LE bit set. The function will call
2537  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2538  * completion of a command iocb. The function will call the
2539  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2540  * The function frees the resources or calls the completion handler if this
2541  * iocb is an abort completion. The function returns NULL when the response
2542  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2543  * this function shall chain the iocb on to the iocb_continueq and return the
2544  * response iocb passed in.
2545  **/
2546 static struct lpfc_iocbq *
2547 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2548                         struct lpfc_iocbq *rspiocbp)
2549 {
2550         struct lpfc_iocbq *saveq;
2551         struct lpfc_iocbq *cmdiocbp;
2552         struct lpfc_iocbq *next_iocb;
2553         IOCB_t *irsp = NULL;
2554         uint32_t free_saveq;
2555         uint8_t iocb_cmd_type;
2556         lpfc_iocb_type type;
2557         unsigned long iflag;
2558         int rc;
2559
2560         spin_lock_irqsave(&phba->hbalock, iflag);
2561         /* First add the response iocb to the countinueq list */
2562         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2563         pring->iocb_continueq_cnt++;
2564
2565         /* Now, determine whetehr the list is completed for processing */
2566         irsp = &rspiocbp->iocb;
2567         if (irsp->ulpLe) {
2568                 /*
2569                  * By default, the driver expects to free all resources
2570                  * associated with this iocb completion.
2571                  */
2572                 free_saveq = 1;
2573                 saveq = list_get_first(&pring->iocb_continueq,
2574                                        struct lpfc_iocbq, list);
2575                 irsp = &(saveq->iocb);
2576                 list_del_init(&pring->iocb_continueq);
2577                 pring->iocb_continueq_cnt = 0;
2578
2579                 pring->stats.iocb_rsp++;
2580
2581                 /*
2582                  * If resource errors reported from HBA, reduce
2583                  * queuedepths of the SCSI device.
2584                  */
2585                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2586                     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2587                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2588                         phba->lpfc_rampdown_queue_depth(phba);
2589                         spin_lock_irqsave(&phba->hbalock, iflag);
2590                 }
2591
2592                 if (irsp->ulpStatus) {
2593                         /* Rsp ring <ringno> error: IOCB */
2594                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2595                                         "0328 Rsp Ring %d error: "
2596                                         "IOCB Data: "
2597                                         "x%x x%x x%x x%x "
2598                                         "x%x x%x x%x x%x "
2599                                         "x%x x%x x%x x%x "
2600                                         "x%x x%x x%x x%x\n",
2601                                         pring->ringno,
2602                                         irsp->un.ulpWord[0],
2603                                         irsp->un.ulpWord[1],
2604                                         irsp->un.ulpWord[2],
2605                                         irsp->un.ulpWord[3],
2606                                         irsp->un.ulpWord[4],
2607                                         irsp->un.ulpWord[5],
2608                                         *(((uint32_t *) irsp) + 6),
2609                                         *(((uint32_t *) irsp) + 7),
2610                                         *(((uint32_t *) irsp) + 8),
2611                                         *(((uint32_t *) irsp) + 9),
2612                                         *(((uint32_t *) irsp) + 10),
2613                                         *(((uint32_t *) irsp) + 11),
2614                                         *(((uint32_t *) irsp) + 12),
2615                                         *(((uint32_t *) irsp) + 13),
2616                                         *(((uint32_t *) irsp) + 14),
2617                                         *(((uint32_t *) irsp) + 15));
2618                 }
2619
2620                 /*
2621                  * Fetch the IOCB command type and call the correct completion
2622                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
2623                  * get freed back to the lpfc_iocb_list by the discovery
2624                  * kernel thread.
2625                  */
2626                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2627                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2628                 switch (type) {
2629                 case LPFC_SOL_IOCB:
2630                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2631                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2632                         spin_lock_irqsave(&phba->hbalock, iflag);
2633                         break;
2634
2635                 case LPFC_UNSOL_IOCB:
2636                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2637                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2638                         spin_lock_irqsave(&phba->hbalock, iflag);
2639                         if (!rc)
2640                                 free_saveq = 0;
2641                         break;
2642
2643                 case LPFC_ABORT_IOCB:
2644                         cmdiocbp = NULL;
2645                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2646                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2647                                                                  saveq);
2648                         if (cmdiocbp) {
2649                                 /* Call the specified completion routine */
2650                                 if (cmdiocbp->iocb_cmpl) {
2651                                         spin_unlock_irqrestore(&phba->hbalock,
2652                                                                iflag);
2653                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2654                                                               saveq);
2655                                         spin_lock_irqsave(&phba->hbalock,
2656                                                           iflag);
2657                                 } else
2658                                         __lpfc_sli_release_iocbq(phba,
2659                                                                  cmdiocbp);
2660                         }
2661                         break;
2662
2663                 case LPFC_UNKNOWN_IOCB:
2664                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2665                                 char adaptermsg[LPFC_MAX_ADPTMSG];
2666                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2667                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2668                                        MAX_MSG_DATA);
2669                                 dev_warn(&((phba->pcidev)->dev),
2670                                          "lpfc%d: %s\n",
2671                                          phba->brd_no, adaptermsg);
2672                         } else {
2673                                 /* Unknown IOCB command */
2674                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2675                                                 "0335 Unknown IOCB "
2676                                                 "command Data: x%x "
2677                                                 "x%x x%x x%x\n",
2678                                                 irsp->ulpCommand,
2679                                                 irsp->ulpStatus,
2680                                                 irsp->ulpIoTag,
2681                                                 irsp->ulpContext);
2682                         }
2683                         break;
2684                 }
2685
2686                 if (free_saveq) {
2687                         list_for_each_entry_safe(rspiocbp, next_iocb,
2688                                                  &saveq->list, list) {
2689                                 list_del(&rspiocbp->list);
2690                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
2691                         }
2692                         __lpfc_sli_release_iocbq(phba, saveq);
2693                 }
2694                 rspiocbp = NULL;
2695         }
2696         spin_unlock_irqrestore(&phba->hbalock, iflag);
2697         return rspiocbp;
2698 }
2699
2700 /**
2701  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2702  * @phba: Pointer to HBA context object.
2703  * @pring: Pointer to driver SLI ring object.
2704  * @mask: Host attention register mask for this ring.
2705  *
2706  * This routine wraps the actual slow_ring event process routine from the
2707  * API jump table function pointer from the lpfc_hba struct.
2708  **/
2709 void
2710 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2711                                 struct lpfc_sli_ring *pring, uint32_t mask)
2712 {
2713         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2714 }
2715
2716 /**
2717  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2718  * @phba: Pointer to HBA context object.
2719  * @pring: Pointer to driver SLI ring object.
2720  * @mask: Host attention register mask for this ring.
2721  *
2722  * This function is called from the worker thread when there is a ring event
2723  * for non-fcp rings. The caller does not hold any lock. The function will
2724  * remove each response iocb in the response ring and calls the handle
2725  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2726  **/
2727 static void
2728 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2729                                    struct lpfc_sli_ring *pring, uint32_t mask)
2730 {
2731         struct lpfc_pgp *pgp;
2732         IOCB_t *entry;
2733         IOCB_t *irsp = NULL;
2734         struct lpfc_iocbq *rspiocbp = NULL;
2735         uint32_t portRspPut, portRspMax;
2736         unsigned long iflag;
2737         uint32_t status;
2738
2739         pgp = &phba->port_gp[pring->ringno];
2740         spin_lock_irqsave(&phba->hbalock, iflag);
2741         pring->stats.iocb_event++;
2742
2743         /*
2744          * The next available response entry should never exceed the maximum
2745          * entries.  If it does, treat it as an adapter hardware error.
2746          */
2747         portRspMax = pring->numRiocb;
2748         portRspPut = le32_to_cpu(pgp->rspPutInx);
2749         if (portRspPut >= portRspMax) {
2750                 /*
2751                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2752                  * rsp ring <portRspMax>
2753                  */
2754                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2755                                 "0303 Ring %d handler: portRspPut %d "
2756                                 "is bigger than rsp ring %d\n",
2757                                 pring->ringno, portRspPut, portRspMax);
2758
2759                 phba->link_state = LPFC_HBA_ERROR;
2760                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2761
2762                 phba->work_hs = HS_FFER3;
2763                 lpfc_handle_eratt(phba);
2764
2765                 return;
2766         }
2767
2768         rmb();
2769         while (pring->rspidx != portRspPut) {
2770                 /*
2771                  * Build a completion list and call the appropriate handler.
2772                  * The process is to get the next available response iocb, get
2773                  * a free iocb from the list, copy the response data into the
2774                  * free iocb, insert to the continuation list, and update the
2775                  * next response index to slim.  This process makes response
2776                  * iocb's in the ring available to DMA as fast as possible but
2777                  * pays a penalty for a copy operation.  Since the iocb is
2778                  * only 32 bytes, this penalty is considered small relative to
2779                  * the PCI reads for register values and a slim write.  When
2780                  * the ulpLe field is set, the entire Command has been
2781                  * received.
2782                  */
2783                 entry = lpfc_resp_iocb(phba, pring);
2784
2785                 phba->last_completion_time = jiffies;
2786                 rspiocbp = __lpfc_sli_get_iocbq(phba);
2787                 if (rspiocbp == NULL) {
2788                         printk(KERN_ERR "%s: out of buffers! Failing "
2789                                "completion.\n", __func__);
2790                         break;
2791                 }
2792
2793                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
2794                                       phba->iocb_rsp_size);
2795                 irsp = &rspiocbp->iocb;
2796
2797                 if (++pring->rspidx >= portRspMax)
2798                         pring->rspidx = 0;
2799
2800                 if (pring->ringno == LPFC_ELS_RING) {
2801                         lpfc_debugfs_slow_ring_trc(phba,
2802                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2803                                 *(((uint32_t *) irsp) + 4),
2804                                 *(((uint32_t *) irsp) + 6),
2805                                 *(((uint32_t *) irsp) + 7));
2806                 }
2807
2808                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2809
2810                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2811                 /* Handle the response IOCB */
2812                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2813                 spin_lock_irqsave(&phba->hbalock, iflag);
2814
2815                 /*
2816                  * If the port response put pointer has not been updated, sync
2817                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
2818                  * response put pointer.
2819                  */
2820                 if (pring->rspidx == portRspPut) {
2821                         portRspPut = le32_to_cpu(pgp->rspPutInx);
2822                 }
2823         } /* while (pring->rspidx != portRspPut) */
2824
2825         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
2826                 /* At least one response entry has been freed */
2827                 pring->stats.iocb_rsp_full++;
2828                 /* SET RxRE_RSP in Chip Att register */
2829                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2830                 writel(status, phba->CAregaddr);
2831                 readl(phba->CAregaddr); /* flush */
2832         }
2833         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2834                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2835                 pring->stats.iocb_cmd_empty++;
2836
2837                 /* Force update of the local copy of cmdGetInx */
2838                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2839                 lpfc_sli_resume_iocb(phba, pring);
2840
2841                 if ((pring->lpfc_sli_cmd_available))
2842                         (pring->lpfc_sli_cmd_available) (phba, pring);
2843
2844         }
2845
2846         spin_unlock_irqrestore(&phba->hbalock, iflag);
2847         return;
2848 }
2849
2850 /**
2851  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
2852  * @phba: Pointer to HBA context object.
2853  * @pring: Pointer to driver SLI ring object.
2854  * @mask: Host attention register mask for this ring.
2855  *
2856  * This function is called from the worker thread when there is a pending
2857  * ELS response iocb on the driver internal slow-path response iocb worker
2858  * queue. The caller does not hold any lock. The function will remove each
2859  * response iocb from the response worker queue and calls the handle
2860  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2861  **/
2862 static void
2863 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
2864                                    struct lpfc_sli_ring *pring, uint32_t mask)
2865 {
2866         struct lpfc_iocbq *irspiocbq;
2867         struct hbq_dmabuf *dmabuf;
2868         struct lpfc_cq_event *cq_event;
2869         unsigned long iflag;
2870
2871         spin_lock_irqsave(&phba->hbalock, iflag);
2872         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2873         spin_unlock_irqrestore(&phba->hbalock, iflag);
2874         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
2875                 /* Get the response iocb from the head of work queue */
2876                 spin_lock_irqsave(&phba->hbalock, iflag);
2877                 list_remove_head(&phba->sli4_hba.sp_queue_event,
2878                                  cq_event, struct lpfc_cq_event, list);
2879                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2880
2881                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2882                 case CQE_CODE_COMPL_WQE:
2883                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2884                                                  cq_event);
2885                         /* Translate ELS WCQE to response IOCBQ */
2886                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2887                                                                    irspiocbq);
2888                         if (irspiocbq)
2889                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
2890                                                            irspiocbq);
2891                         break;
2892                 case CQE_CODE_RECEIVE:
2893                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
2894                                               cq_event);
2895                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
2896                         break;
2897                 default:
2898                         break;
2899                 }
2900         }
2901 }
2902
2903 /**
2904  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
2905  * @phba: Pointer to HBA context object.
2906  * @pring: Pointer to driver SLI ring object.
2907  *
2908  * This function aborts all iocbs in the given ring and frees all the iocb
2909  * objects in txq. This function issues an abort iocb for all the iocb commands
2910  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
2911  * the return of this function. The caller is not required to hold any locks.
2912  **/
2913 void
2914 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2915 {
2916         LIST_HEAD(completions);
2917         struct lpfc_iocbq *iocb, *next_iocb;
2918
2919         if (pring->ringno == LPFC_ELS_RING) {
2920                 lpfc_fabric_abort_hba(phba);
2921         }
2922
2923         /* Error everything on txq and txcmplq
2924          * First do the txq.
2925          */
2926         spin_lock_irq(&phba->hbalock);
2927         list_splice_init(&pring->txq, &completions);
2928         pring->txq_cnt = 0;
2929
2930         /* Next issue ABTS for everything on the txcmplq */
2931         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
2932                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2933
2934         spin_unlock_irq(&phba->hbalock);
2935
2936         /* Cancel all the IOCBs from the completions list */
2937         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
2938                               IOERR_SLI_ABORTED);
2939 }
2940
2941 /**
2942  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
2943  * @phba: Pointer to HBA context object.
2944  *
2945  * This function flushes all iocbs in the fcp ring and frees all the iocb
2946  * objects in txq and txcmplq. This function will not issue abort iocbs
2947  * for all the iocb commands in txcmplq, they will just be returned with
2948  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2949  * slot has been permanently disabled.
2950  **/
2951 void
2952 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2953 {
2954         LIST_HEAD(txq);
2955         LIST_HEAD(txcmplq);
2956         struct lpfc_sli *psli = &phba->sli;
2957         struct lpfc_sli_ring  *pring;
2958
2959         /* Currently, only one fcp ring */
2960         pring = &psli->ring[psli->fcp_ring];
2961
2962         spin_lock_irq(&phba->hbalock);
2963         /* Retrieve everything on txq */
2964         list_splice_init(&pring->txq, &txq);
2965         pring->txq_cnt = 0;
2966
2967         /* Retrieve everything on the txcmplq */
2968         list_splice_init(&pring->txcmplq, &txcmplq);
2969         pring->txcmplq_cnt = 0;
2970         spin_unlock_irq(&phba->hbalock);
2971
2972         /* Flush the txq */
2973         lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
2974                               IOERR_SLI_DOWN);
2975
2976         /* Flush the txcmpq */
2977         lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
2978                               IOERR_SLI_DOWN);
2979 }
2980
2981 /**
2982  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2983  * @phba: Pointer to HBA context object.
2984  * @mask: Bit mask to be checked.
2985  *
2986  * This function reads the host status register and compares
2987  * with the provided bit mask to check if HBA completed
2988  * the restart. This function will wait in a loop for the
2989  * HBA to complete restart. If the HBA does not restart within
2990  * 15 iterations, the function will reset the HBA again. The
2991  * function returns 1 when HBA fail to restart otherwise returns
2992  * zero.
2993  **/
2994 static int
2995 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2996 {
2997         uint32_t status;
2998         int i = 0;
2999         int retval = 0;
3000
3001         /* Read the HBA Host Status Register */
3002         status = readl(phba->HSregaddr);
3003
3004         /*
3005          * Check status register every 100ms for 5 retries, then every
3006          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3007          * every 2.5 sec for 4.
3008          * Break our of the loop if errors occurred during init.
3009          */
3010         while (((status & mask) != mask) &&
3011                !(status & HS_FFERM) &&
3012                i++ < 20) {
3013
3014                 if (i <= 5)
3015                         msleep(10);
3016                 else if (i <= 10)
3017                         msleep(500);
3018                 else
3019                         msleep(2500);
3020
3021                 if (i == 15) {
3022                                 /* Do post */
3023                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3024                         lpfc_sli_brdrestart(phba);
3025                 }
3026                 /* Read the HBA Host Status Register */
3027                 status = readl(phba->HSregaddr);
3028         }
3029
3030         /* Check to see if any errors occurred during init */
3031         if ((status & HS_FFERM) || (i >= 20)) {
3032                 phba->link_state = LPFC_HBA_ERROR;
3033                 retval = 1;
3034         }
3035
3036         return retval;
3037 }
3038
3039 /**
3040  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3041  * @phba: Pointer to HBA context object.
3042  * @mask: Bit mask to be checked.
3043  *
3044  * This function checks the host status register to check if HBA is
3045  * ready. This function will wait in a loop for the HBA to be ready
3046  * If the HBA is not ready , the function will will reset the HBA PCI
3047  * function again. The function returns 1 when HBA fail to be ready
3048  * otherwise returns zero.
3049  **/
3050 static int
3051 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3052 {
3053         uint32_t status;
3054         int retval = 0;
3055
3056         /* Read the HBA Host Status Register */
3057         status = lpfc_sli4_post_status_check(phba);
3058
3059         if (status) {
3060                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3061                 lpfc_sli_brdrestart(phba);
3062                 status = lpfc_sli4_post_status_check(phba);
3063         }
3064
3065         /* Check to see if any errors occurred during init */
3066         if (status) {
3067                 phba->link_state = LPFC_HBA_ERROR;
3068                 retval = 1;
3069         } else
3070                 phba->sli4_hba.intr_enable = 0;
3071
3072         return retval;
3073 }
3074
3075 /**
3076  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3077  * @phba: Pointer to HBA context object.
3078  * @mask: Bit mask to be checked.
3079  *
3080  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3081  * from the API jump table function pointer from the lpfc_hba struct.
3082  **/
3083 int
3084 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3085 {
3086         return phba->lpfc_sli_brdready(phba, mask);
3087 }
3088
3089 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3090
3091 /**
3092  * lpfc_reset_barrier - Make HBA ready for HBA reset
3093  * @phba: Pointer to HBA context object.
3094  *
3095  * This function is called before resetting an HBA. This
3096  * function requests HBA to quiesce DMAs before a reset.
3097  **/
3098 void lpfc_reset_barrier(struct lpfc_hba *phba)
3099 {
3100         uint32_t __iomem *resp_buf;
3101         uint32_t __iomem *mbox_buf;
3102         volatile uint32_t mbox;
3103         uint32_t hc_copy;
3104         int  i;
3105         uint8_t hdrtype;
3106
3107         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3108         if (hdrtype != 0x80 ||
3109             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3110              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3111                 return;
3112
3113         /*
3114          * Tell the other part of the chip to suspend temporarily all
3115          * its DMA activity.
3116          */
3117         resp_buf = phba->MBslimaddr;
3118
3119         /* Disable the error attention */
3120         hc_copy = readl(phba->HCregaddr);
3121         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3122         readl(phba->HCregaddr); /* flush */
3123         phba->link_flag |= LS_IGNORE_ERATT;
3124
3125         if (readl(phba->HAregaddr) & HA_ERATT) {
3126                 /* Clear Chip error bit */
3127                 writel(HA_ERATT, phba->HAregaddr);
3128                 phba->pport->stopped = 1;
3129         }
3130
3131         mbox = 0;
3132         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3133         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3134
3135         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3136         mbox_buf = phba->MBslimaddr;
3137         writel(mbox, mbox_buf);
3138
3139         for (i = 0;
3140              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
3141                 mdelay(1);
3142
3143         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
3144                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3145                     phba->pport->stopped)
3146                         goto restore_hc;
3147                 else
3148                         goto clear_errat;
3149         }
3150
3151         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3152         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
3153                 mdelay(1);
3154
3155 clear_errat:
3156
3157         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
3158                 mdelay(1);
3159
3160         if (readl(phba->HAregaddr) & HA_ERATT) {
3161                 writel(HA_ERATT, phba->HAregaddr);
3162                 phba->pport->stopped = 1;
3163         }
3164
3165 restore_hc:
3166         phba->link_flag &= ~LS_IGNORE_ERATT;
3167         writel(hc_copy, phba->HCregaddr);
3168         readl(phba->HCregaddr); /* flush */
3169 }
3170
3171 /**
3172  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3173  * @phba: Pointer to HBA context object.
3174  *
3175  * This function issues a kill_board mailbox command and waits for
3176  * the error attention interrupt. This function is called for stopping
3177  * the firmware processing. The caller is not required to hold any
3178  * locks. This function calls lpfc_hba_down_post function to free
3179  * any pending commands after the kill. The function will return 1 when it
3180  * fails to kill the board else will return 0.
3181  **/
3182 int
3183 lpfc_sli_brdkill(struct lpfc_hba *phba)
3184 {
3185         struct lpfc_sli *psli;
3186         LPFC_MBOXQ_t *pmb;
3187         uint32_t status;
3188         uint32_t ha_copy;
3189         int retval;
3190         int i = 0;
3191
3192         psli = &phba->sli;
3193
3194         /* Kill HBA */
3195         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3196                         "0329 Kill HBA Data: x%x x%x\n",
3197                         phba->pport->port_state, psli->sli_flag);
3198
3199         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3200         if (!pmb)
3201                 return 1;
3202
3203         /* Disable the error attention */
3204         spin_lock_irq(&phba->hbalock);
3205         status = readl(phba->HCregaddr);
3206         status &= ~HC_ERINT_ENA;
3207         writel(status, phba->HCregaddr);
3208         readl(phba->HCregaddr); /* flush */
3209         phba->link_flag |= LS_IGNORE_ERATT;
3210         spin_unlock_irq(&phba->hbalock);
3211
3212         lpfc_kill_board(phba, pmb);
3213         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3214         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3215
3216         if (retval != MBX_SUCCESS) {
3217                 if (retval != MBX_BUSY)
3218                         mempool_free(pmb, phba->mbox_mem_pool);
3219                 spin_lock_irq(&phba->hbalock);
3220                 phba->link_flag &= ~LS_IGNORE_ERATT;
3221                 spin_unlock_irq(&phba->hbalock);
3222                 return 1;
3223         }
3224
3225         spin_lock_irq(&phba->hbalock);
3226         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3227         spin_unlock_irq(&phba->hbalock);
3228
3229         mempool_free(pmb, phba->mbox_mem_pool);
3230
3231         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3232          * attention every 100ms for 3 seconds. If we don't get ERATT after
3233          * 3 seconds we still set HBA_ERROR state because the status of the
3234          * board is now undefined.
3235          */
3236         ha_copy = readl(phba->HAregaddr);
3237
3238         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3239                 mdelay(100);
3240                 ha_copy = readl(phba->HAregaddr);
3241         }
3242
3243         del_timer_sync(&psli->mbox_tmo);
3244         if (ha_copy & HA_ERATT) {
3245                 writel(HA_ERATT, phba->HAregaddr);
3246                 phba->pport->stopped = 1;
3247         }
3248         spin_lock_irq(&phba->hbalock);
3249         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3250         psli->mbox_active = NULL;
3251         phba->link_flag &= ~LS_IGNORE_ERATT;
3252         spin_unlock_irq(&phba->hbalock);
3253
3254         lpfc_hba_down_post(phba);
3255         phba->link_state = LPFC_HBA_ERROR;
3256
3257         return ha_copy & HA_ERATT ? 0 : 1;
3258 }
3259
3260 /**
3261  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3262  * @phba: Pointer to HBA context object.
3263  *
3264  * This function resets the HBA by writing HC_INITFF to the control
3265  * register. After the HBA resets, this function resets all the iocb ring
3266  * indices. This function disables PCI layer parity checking during
3267  * the reset.
3268  * This function returns 0 always.
3269  * The caller is not required to hold any locks.
3270  **/
3271 int
3272 lpfc_sli_brdreset(struct lpfc_hba *phba)
3273 {
3274         struct lpfc_sli *psli;
3275         struct lpfc_sli_ring *pring;
3276         uint16_t cfg_value;
3277         int i;
3278
3279         psli = &phba->sli;
3280
3281         /* Reset HBA */
3282         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3283                         "0325 Reset HBA Data: x%x x%x\n",
3284                         phba->pport->port_state, psli->sli_flag);
3285
3286         /* perform board reset */
3287         phba->fc_eventTag = 0;
3288         phba->link_events = 0;
3289         phba->pport->fc_myDID = 0;
3290         phba->pport->fc_prevDID = 0;
3291
3292         /* Turn off parity checking and serr during the physical reset */
3293         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3294         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3295                               (cfg_value &
3296                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3297
3298         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3299
3300         /* Now toggle INITFF bit in the Host Control Register */
3301         writel(HC_INITFF, phba->HCregaddr);
3302         mdelay(1);
3303         readl(phba->HCregaddr); /* flush */
3304         writel(0, phba->HCregaddr);
3305         readl(phba->HCregaddr); /* flush */
3306
3307         /* Restore PCI cmd register */
3308         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3309
3310         /* Initialize relevant SLI info */
3311         for (i = 0; i < psli->num_rings; i++) {
3312                 pring = &psli->ring[i];
3313                 pring->flag = 0;
3314                 pring->rspidx = 0;
3315                 pring->next_cmdidx  = 0;
3316                 pring->local_getidx = 0;
3317                 pring->cmdidx = 0;
3318                 pring->missbufcnt = 0;
3319         }
3320
3321         phba->link_state = LPFC_WARM_START;
3322         return 0;
3323 }
3324
3325 /**
3326  * lpfc_sli4_brdreset - Reset a sli-4 HBA
3327  * @phba: Pointer to HBA context object.
3328  *
3329  * This function resets a SLI4 HBA. This function disables PCI layer parity
3330  * checking during resets the device. The caller is not required to hold
3331  * any locks.
3332  *
3333  * This function returns 0 always.
3334  **/
3335 int
3336 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3337 {
3338         struct lpfc_sli *psli = &phba->sli;
3339         uint16_t cfg_value;
3340         uint8_t qindx;
3341
3342         /* Reset HBA */
3343         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3344                         "0295 Reset HBA Data: x%x x%x\n",
3345                         phba->pport->port_state, psli->sli_flag);
3346
3347         /* perform board reset */
3348         phba->fc_eventTag = 0;
3349         phba->link_events = 0;
3350         phba->pport->fc_myDID = 0;
3351         phba->pport->fc_prevDID = 0;
3352
3353         /* Turn off parity checking and serr during the physical reset */
3354         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3355         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3356                               (cfg_value &
3357                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3358
3359         spin_lock_irq(&phba->hbalock);
3360         psli->sli_flag &= ~(LPFC_PROCESS_LA);
3361         phba->fcf.fcf_flag = 0;
3362         /* Clean up the child queue list for the CQs */
3363         list_del_init(&phba->sli4_hba.mbx_wq->list);
3364         list_del_init(&phba->sli4_hba.els_wq->list);
3365         list_del_init(&phba->sli4_hba.hdr_rq->list);
3366         list_del_init(&phba->sli4_hba.dat_rq->list);
3367         list_del_init(&phba->sli4_hba.mbx_cq->list);
3368         list_del_init(&phba->sli4_hba.els_cq->list);
3369         for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3370                 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3371         for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3372                 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3373         spin_unlock_irq(&phba->hbalock);
3374
3375         /* Now physically reset the device */
3376         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3377                         "0389 Performing PCI function reset!\n");
3378         /* Perform FCoE PCI function reset */
3379         lpfc_pci_function_reset(phba);
3380
3381         return 0;
3382 }
3383
3384 /**
3385  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3386  * @phba: Pointer to HBA context object.
3387  *
3388  * This function is called in the SLI initialization code path to
3389  * restart the HBA. The caller is not required to hold any lock.
3390  * This function writes MBX_RESTART mailbox command to the SLIM and
3391  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3392  * function to free any pending commands. The function enables
3393  * POST only during the first initialization. The function returns zero.
3394  * The function does not guarantee completion of MBX_RESTART mailbox
3395  * command before the return of this function.
3396  **/
3397 static int
3398 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3399 {
3400         MAILBOX_t *mb;
3401         struct lpfc_sli *psli;
3402         volatile uint32_t word0;
3403         void __iomem *to_slim;
3404         uint32_t hba_aer_enabled;
3405
3406         spin_lock_irq(&phba->hbalock);
3407
3408         /* Take PCIe device Advanced Error Reporting (AER) state */
3409         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3410
3411         psli = &phba->sli;
3412
3413         /* Restart HBA */
3414         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3415                         "0337 Restart HBA Data: x%x x%x\n",
3416                         phba->pport->port_state, psli->sli_flag);
3417
3418         word0 = 0;
3419         mb = (MAILBOX_t *) &word0;
3420         mb->mbxCommand = MBX_RESTART;
3421         mb->mbxHc = 1;
3422
3423         lpfc_reset_barrier(phba);
3424
3425         to_slim = phba->MBslimaddr;
3426         writel(*(uint32_t *) mb, to_slim);
3427         readl(to_slim); /* flush */
3428
3429         /* Only skip post after fc_ffinit is completed */
3430         if (phba->pport->port_state)
3431                 word0 = 1;      /* This is really setting up word1 */
3432         else
3433                 word0 = 0;      /* This is really setting up word1 */
3434         to_slim = phba->MBslimaddr + sizeof (uint32_t);
3435         writel(*(uint32_t *) mb, to_slim);
3436         readl(to_slim); /* flush */
3437
3438         lpfc_sli_brdreset(phba);
3439         phba->pport->stopped = 0;
3440         phba->link_state = LPFC_INIT_START;
3441         phba->hba_flag = 0;
3442         spin_unlock_irq(&phba->hbalock);
3443
3444         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3445         psli->stats_start = get_seconds();
3446
3447         /* Give the INITFF and Post time to settle. */
3448         mdelay(100);
3449
3450         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3451         if (hba_aer_enabled)
3452                 pci_disable_pcie_error_reporting(phba->pcidev);
3453
3454         lpfc_hba_down_post(phba);
3455
3456         return 0;
3457 }
3458
3459 /**
3460  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3461  * @phba: Pointer to HBA context object.
3462  *
3463  * This function is called in the SLI initialization code path to restart
3464  * a SLI4 HBA. The caller is not required to hold any lock.
3465  * At the end of the function, it calls lpfc_hba_down_post function to
3466  * free any pending commands.
3467  **/
3468 static int
3469 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3470 {
3471         struct lpfc_sli *psli = &phba->sli;
3472
3473
3474         /* Restart HBA */
3475         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3476                         "0296 Restart HBA Data: x%x x%x\n",
3477                         phba->pport->port_state, psli->sli_flag);
3478
3479         lpfc_sli4_brdreset(phba);
3480
3481         spin_lock_irq(&phba->hbalock);
3482         phba->pport->stopped = 0;
3483         phba->link_state = LPFC_INIT_START;
3484         phba->hba_flag = 0;
3485         spin_unlock_irq(&phba->hbalock);
3486
3487         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3488         psli->stats_start = get_seconds();
3489
3490         lpfc_hba_down_post(phba);
3491
3492         return 0;
3493 }
3494
3495 /**
3496  * lpfc_sli_brdrestart - Wrapper func for restarting hba
3497  * @phba: Pointer to HBA context object.
3498  *
3499  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3500  * API jump table function pointer from the lpfc_hba struct.
3501 **/
3502 int
3503 lpfc_sli_brdrestart(struct lpfc_hba *phba)
3504 {
3505         return phba->lpfc_sli_brdrestart(phba);
3506 }
3507
3508 /**
3509  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
3510  * @phba: Pointer to HBA context object.
3511  *
3512  * This function is called after a HBA restart to wait for successful
3513  * restart of the HBA. Successful restart of the HBA is indicated by
3514  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
3515  * iteration, the function will restart the HBA again. The function returns
3516  * zero if HBA successfully restarted else returns negative error code.
3517  **/
3518 static int
3519 lpfc_sli_chipset_init(struct lpfc_hba *phba)
3520 {
3521         uint32_t status, i = 0;
3522
3523         /* Read the HBA Host Status Register */
3524         status = readl(phba->HSregaddr);
3525
3526         /* Check status register to see what current state is */
3527         i = 0;
3528         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
3529
3530                 /* Check every 100ms for 5 retries, then every 500ms for 5, then
3531                  * every 2.5 sec for 5, then reset board and every 2.5 sec for
3532                  * 4.
3533                  */
3534                 if (i++ >= 20) {
3535                         /* Adapter failed to init, timeout, status reg
3536                            <status> */
3537                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3538                                         "0436 Adapter failed to init, "
3539                                         "timeout, status reg x%x, "
3540                                         "FW Data: A8 x%x AC x%x\n", status,
3541                                         readl(phba->MBslimaddr + 0xa8),
3542                                         readl(phba->MBslimaddr + 0xac));
3543                         phba->link_state = LPFC_HBA_ERROR;
3544                         return -ETIMEDOUT;
3545                 }
3546
3547                 /* Check to see if any errors occurred during init */
3548                 if (status & HS_FFERM) {
3549                         /* ERROR: During chipset initialization */
3550                         /* Adapter failed to init, chipset, status reg
3551                            <status> */
3552                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3553                                         "0437 Adapter failed to init, "
3554                                         "chipset, status reg x%x, "
3555                                         "FW Data: A8 x%x AC x%x\n", status,
3556                                         readl(phba->MBslimaddr + 0xa8),
3557                                         readl(phba->MBslimaddr + 0xac));
3558                         phba->link_state = LPFC_HBA_ERROR;
3559                         return -EIO;
3560                 }
3561
3562                 if (i <= 5) {
3563                         msleep(10);
3564                 } else if (i <= 10) {
3565                         msleep(500);
3566                 } else {
3567                         msleep(2500);
3568                 }
3569
3570                 if (i == 15) {
3571                                 /* Do post */
3572                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3573                         lpfc_sli_brdrestart(phba);
3574                 }
3575                 /* Read the HBA Host Status Register */
3576                 status = readl(phba->HSregaddr);
3577         }
3578
3579         /* Check to see if any errors occurred during init */
3580         if (status & HS_FFERM) {
3581                 /* ERROR: During chipset initialization */
3582                 /* Adapter failed to init, chipset, status reg <status> */
3583                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3584                                 "0438 Adapter failed to init, chipset, "
3585                                 "status reg x%x, "
3586                                 "FW Data: A8 x%x AC x%x\n", status,
3587                                 readl(phba->MBslimaddr + 0xa8),
3588                                 readl(phba->MBslimaddr + 0xac));
3589                 phba->link_state = LPFC_HBA_ERROR;
3590                 return -EIO;
3591         }
3592
3593         /* Clear all interrupt enable conditions */
3594         writel(0, phba->HCregaddr);
3595         readl(phba->HCregaddr); /* flush */
3596
3597         /* setup host attn register */
3598         writel(0xffffffff, phba->HAregaddr);
3599         readl(phba->HAregaddr); /* flush */
3600         return 0;
3601 }
3602
3603 /**
3604  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
3605  *
3606  * This function calculates and returns the number of HBQs required to be
3607  * configured.
3608  **/
3609 int
3610 lpfc_sli_hbq_count(void)
3611 {
3612         return ARRAY_SIZE(lpfc_hbq_defs);
3613 }
3614
3615 /**
3616  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
3617  *
3618  * This function adds the number of hbq entries in every HBQ to get
3619  * the total number of hbq entries required for the HBA and returns
3620  * the total count.
3621  **/
3622 static int
3623 lpfc_sli_hbq_entry_count(void)
3624 {
3625         int  hbq_count = lpfc_sli_hbq_count();
3626         int  count = 0;
3627         int  i;
3628
3629         for (i = 0; i < hbq_count; ++i)
3630                 count += lpfc_hbq_defs[i]->entry_count;
3631         return count;
3632 }
3633
3634 /**
3635  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
3636  *
3637  * This function calculates amount of memory required for all hbq entries
3638  * to be configured and returns the total memory required.
3639  **/
3640 int
3641 lpfc_sli_hbq_size(void)
3642 {
3643         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
3644 }
3645
3646 /**
3647  * lpfc_sli_hbq_setup - configure and initialize HBQs
3648  * @phba: Pointer to HBA context object.
3649  *
3650  * This function is called during the SLI initialization to configure
3651  * all the HBQs and post buffers to the HBQ. The caller is not
3652  * required to hold any locks. This function will return zero if successful
3653  * else it will return negative error code.
3654  **/
3655 static int
3656 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3657 {
3658         int  hbq_count = lpfc_sli_hbq_count();
3659         LPFC_MBOXQ_t *pmb;
3660         MAILBOX_t *pmbox;
3661         uint32_t hbqno;
3662         uint32_t hbq_entry_index;
3663
3664                                 /* Get a Mailbox buffer to setup mailbox
3665                                  * commands for HBA initialization
3666                                  */
3667         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3668
3669         if (!pmb)
3670                 return -ENOMEM;
3671
3672         pmbox = &pmb->u.mb;
3673
3674         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
3675         phba->link_state = LPFC_INIT_MBX_CMDS;
3676         phba->hbq_in_use = 1;
3677
3678         hbq_entry_index = 0;
3679         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
3680                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
3681                 phba->hbqs[hbqno].hbqPutIdx      = 0;
3682                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
3683                 phba->hbqs[hbqno].entry_count =
3684                         lpfc_hbq_defs[hbqno]->entry_count;
3685                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
3686                         hbq_entry_index, pmb);
3687                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
3688
3689                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
3690                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
3691                            mbxStatus <status>, ring <num> */
3692
3693                         lpfc_printf_log(phba, KERN_ERR,
3694                                         LOG_SLI | LOG_VPORT,
3695                                         "1805 Adapter failed to init. "
3696                                         "Data: x%x x%x x%x\n",
3697                                         pmbox->mbxCommand,
3698                                         pmbox->mbxStatus, hbqno);
3699
3700                         phba->link_state = LPFC_HBA_ERROR;
3701                         mempool_free(pmb, phba->mbox_mem_pool);
3702                         return ENXIO;
3703                 }
3704         }
3705         phba->hbq_count = hbq_count;
3706
3707         mempool_free(pmb, phba->mbox_mem_pool);
3708
3709         /* Initially populate or replenish the HBQs */
3710         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
3711                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
3712         return 0;
3713 }
3714
3715 /**
3716  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3717  * @phba: Pointer to HBA context object.
3718  *
3719  * This function is called during the SLI initialization to configure
3720  * all the HBQs and post buffers to the HBQ. The caller is not
3721  * required to hold any locks. This function will return zero if successful
3722  * else it will return negative error code.
3723  **/
3724 static int
3725 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3726 {
3727         phba->hbq_in_use = 1;
3728         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3729         phba->hbq_count = 1;
3730         /* Initially populate or replenish the HBQs */
3731         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3732         return 0;
3733 }
3734
3735 /**
3736  * lpfc_sli_config_port - Issue config port mailbox command
3737  * @phba: Pointer to HBA context object.
3738  * @sli_mode: sli mode - 2/3
3739  *
3740  * This function is called by the sli intialization code path
3741  * to issue config_port mailbox command. This function restarts the
3742  * HBA firmware and issues a config_port mailbox command to configure
3743  * the SLI interface in the sli mode specified by sli_mode
3744  * variable. The caller is not required to hold any locks.
3745  * The function returns 0 if successful, else returns negative error
3746  * code.
3747  **/
3748 int
3749 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3750 {
3751         LPFC_MBOXQ_t *pmb;
3752         uint32_t resetcount = 0, rc = 0, done = 0;
3753
3754         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3755         if (!pmb) {
3756                 phba->link_state = LPFC_HBA_ERROR;
3757                 return -ENOMEM;
3758         }
3759
3760         phba->sli_rev = sli_mode;
3761         while (resetcount < 2 && !done) {
3762                 spin_lock_irq(&phba->hbalock);
3763                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
3764                 spin_unlock_irq(&phba->hbalock);
3765                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3766                 lpfc_sli_brdrestart(phba);
3767                 rc = lpfc_sli_chipset_init(phba);
3768                 if (rc)
3769                         break;
3770
3771                 spin_lock_irq(&phba->hbalock);
3772                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3773                 spin_unlock_irq(&phba->hbalock);
3774                 resetcount++;
3775
3776                 /* Call pre CONFIG_PORT mailbox command initialization.  A
3777                  * value of 0 means the call was successful.  Any other
3778                  * nonzero value is a failure, but if ERESTART is returned,
3779                  * the driver may reset the HBA and try again.
3780                  */
3781                 rc = lpfc_config_port_prep(phba);
3782                 if (rc == -ERESTART) {
3783                         phba->link_state = LPFC_LINK_UNKNOWN;
3784                         continue;
3785                 } else if (rc)
3786                         break;
3787                 phba->link_state = LPFC_INIT_MBX_CMDS;
3788                 lpfc_config_port(phba, pmb);
3789                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
3790                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3791                                         LPFC_SLI3_HBQ_ENABLED |
3792                                         LPFC_SLI3_CRP_ENABLED |
3793                                         LPFC_SLI3_INB_ENABLED |
3794                                         LPFC_SLI3_BG_ENABLED);
3795                 if (rc != MBX_SUCCESS) {
3796                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3797                                 "0442 Adapter failed to init, mbxCmd x%x "
3798                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3799                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3800                         spin_lock_irq(&phba->hbalock);
3801                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3802                         spin_unlock_irq(&phba->hbalock);
3803                         rc = -ENXIO;
3804                 } else {
3805                         /* Allow asynchronous mailbox command to go through */
3806                         spin_lock_irq(&phba->hbalock);
3807                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3808                         spin_unlock_irq(&phba->hbalock);
3809                         done = 1;
3810                 }
3811         }
3812         if (!done) {
3813                 rc = -EINVAL;
3814                 goto do_prep_failed;
3815         }
3816         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3817                 if (!pmb->u.mb.un.varCfgPort.cMA) {
3818                         rc = -ENXIO;
3819                         goto do_prep_failed;
3820                 }
3821                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3822                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3823                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3824                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3825                                 phba->max_vpi : phba->max_vports;
3826
3827                 } else
3828                         phba->max_vpi = 0;
3829                 if (pmb->u.mb.un.varCfgPort.gdss)
3830                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3831                 if (pmb->u.mb.un.varCfgPort.gerbm)
3832                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3833                 if (pmb->u.mb.un.varCfgPort.gcrp)
3834                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3835                 if (pmb->u.mb.un.varCfgPort.ginb) {
3836                         phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3837                         phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3838                         phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3839                         phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3840                         phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3841                         phba->inb_last_counter =
3842                                         phba->mbox->us.s3_inb_pgp.counter;
3843                 } else {
3844                         phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3845                         phba->port_gp = phba->mbox->us.s3_pgp.port;
3846                         phba->inb_ha_copy = NULL;
3847                         phba->inb_counter = NULL;
3848                 }
3849
3850                 if (phba->cfg_enable_bg) {
3851                         if (pmb->u.mb.un.varCfgPort.gbg)
3852                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3853                         else
3854                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3855                                                 "0443 Adapter did not grant "
3856                                                 "BlockGuard\n");
3857                 }
3858         } else {
3859                 phba->hbq_get = NULL;
3860                 phba->port_gp = phba->mbox->us.s2.port;
3861                 phba->inb_ha_copy = NULL;
3862                 phba->inb_counter = NULL;
3863                 phba->max_vpi = 0;
3864         }
3865 do_prep_failed:
3866         mempool_free(pmb, phba->mbox_mem_pool);
3867         return rc;
3868 }
3869
3870
3871 /**
3872  * lpfc_sli_hba_setup - SLI intialization function
3873  * @phba: Pointer to HBA context object.
3874  *
3875  * This function is the main SLI intialization function. This function
3876  * is called by the HBA intialization code, HBA reset code and HBA
3877  * error attention handler code. Caller is not required to hold any
3878  * locks. This function issues config_port mailbox command to configure
3879  * the SLI, setup iocb rings and HBQ rings. In the end the function
3880  * calls the config_port_post function to issue init_link mailbox
3881  * command and to start the discovery. The function will return zero
3882  * if successful, else it will return negative error code.
3883  **/
3884 int
3885 lpfc_sli_hba_setup(struct lpfc_hba *phba)
3886 {
3887         uint32_t rc;
3888         int  mode = 3;
3889
3890         switch (lpfc_sli_mode) {
3891         case 2:
3892                 if (phba->cfg_enable_npiv) {
3893                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
3894                                 "1824 NPIV enabled: Override lpfc_sli_mode "
3895                                 "parameter (%d) to auto (0).\n",
3896                                 lpfc_sli_mode);
3897                         break;
3898                 }
3899                 mode = 2;
3900                 break;
3901         case 0:
3902         case 3:
3903                 break;
3904         default:
3905                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
3906                                 "1819 Unrecognized lpfc_sli_mode "
3907                                 "parameter: %d.\n", lpfc_sli_mode);
3908
3909                 break;
3910         }
3911
3912         rc = lpfc_sli_config_port(phba, mode);
3913
3914         if (rc && lpfc_sli_mode == 3)
3915                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
3916                                 "1820 Unable to select SLI-3.  "
3917                                 "Not supported by adapter.\n");
3918         if (rc && mode != 2)
3919                 rc = lpfc_sli_config_port(phba, 2);
3920         if (rc)
3921                 goto lpfc_sli_hba_setup_error;
3922
3923         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
3924         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
3925                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
3926                 if (!rc) {
3927                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3928                                         "2709 This device supports "
3929                                         "Advanced Error Reporting (AER)\n");
3930                         spin_lock_irq(&phba->hbalock);
3931                         phba->hba_flag |= HBA_AER_ENABLED;
3932                         spin_unlock_irq(&phba->hbalock);
3933                 } else {
3934                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3935                                         "2708 This device does not support "
3936                                         "Advanced Error Reporting (AER)\n");
3937                         phba->cfg_aer_support = 0;
3938                 }
3939         }
3940
3941         if (phba->sli_rev == 3) {
3942                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
3943                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
3944         } else {
3945                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
3946                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
3947                 phba->sli3_options = 0;
3948         }
3949
3950         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3951                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
3952                         phba->sli_rev, phba->max_vpi);
3953         rc = lpfc_sli_ring_map(phba);
3954
3955         if (rc)
3956                 goto lpfc_sli_hba_setup_error;
3957
3958         /* Init HBQs */
3959         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3960                 rc = lpfc_sli_hbq_setup(phba);
3961                 if (rc)
3962                         goto lpfc_sli_hba_setup_error;
3963         }
3964         spin_lock_irq(&phba->hbalock);
3965         phba->sli.sli_flag |= LPFC_PROCESS_LA;
3966         spin_unlock_irq(&phba->hbalock);
3967
3968         rc = lpfc_config_port_post(phba);
3969         if (rc)
3970                 goto lpfc_sli_hba_setup_error;
3971
3972         return rc;
3973
3974 lpfc_sli_hba_setup_error:
3975         phba->link_state = LPFC_HBA_ERROR;
3976         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3977                         "0445 Firmware initialization failed\n");
3978         return rc;
3979 }
3980
3981 /**
3982  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
3983  * @phba: Pointer to HBA context object.
3984  * @mboxq: mailbox pointer.
3985  * This function issue a dump mailbox command to read config region
3986  * 23 and parse the records in the region and populate driver
3987  * data structure.
3988  **/
3989 static int
3990 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
3991                 LPFC_MBOXQ_t *mboxq)
3992 {
3993         struct lpfc_dmabuf *mp;
3994         struct lpfc_mqe *mqe;
3995         uint32_t data_length;
3996         int rc;
3997
3998         /* Program the default value of vlan_id and fc_map */
3999         phba->valid_vlan = 0;
4000         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4001         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4002         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4003
4004         mqe = &mboxq->u.mqe;
4005         if (lpfc_dump_fcoe_param(phba, mboxq))
4006                 return -ENOMEM;
4007
4008         mp = (struct lpfc_dmabuf *) mboxq->context1;
4009         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4010
4011         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4012                         "(%d):2571 Mailbox cmd x%x Status x%x "
4013                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4014                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4015                         "CQ: x%x x%x x%x x%x\n",
4016                         mboxq->vport ? mboxq->vport->vpi : 0,
4017                         bf_get(lpfc_mqe_command, mqe),
4018                         bf_get(lpfc_mqe_status, mqe),
4019                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4020                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4021                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4022                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4023                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4024                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4025                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4026                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4027                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4028                         mboxq->mcqe.word0,
4029                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4030                         mboxq->mcqe.trailer);
4031
4032         if (rc) {
4033                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4034                 kfree(mp);
4035                 return -EIO;
4036         }
4037         data_length = mqe->un.mb_words[5];
4038         if (data_length > DMP_RGN23_SIZE) {
4039                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4040                 kfree(mp);
4041                 return -EIO;
4042         }
4043
4044         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4045         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4046         kfree(mp);
4047         return 0;
4048 }
4049
4050 /**
4051  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4052  * @phba: pointer to lpfc hba data structure.
4053  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4054  * @vpd: pointer to the memory to hold resulting port vpd data.
4055  * @vpd_size: On input, the number of bytes allocated to @vpd.
4056  *            On output, the number of data bytes in @vpd.
4057  *
4058  * This routine executes a READ_REV SLI4 mailbox command.  In
4059  * addition, this routine gets the port vpd data.
4060  *
4061  * Return codes
4062  *      0 - sucessful
4063  *      ENOMEM - could not allocated memory.
4064  **/
4065 static int
4066 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4067                     uint8_t *vpd, uint32_t *vpd_size)
4068 {
4069         int rc = 0;
4070         uint32_t dma_size;
4071         struct lpfc_dmabuf *dmabuf;
4072         struct lpfc_mqe *mqe;
4073
4074         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4075         if (!dmabuf)
4076                 return -ENOMEM;
4077
4078         /*
4079          * Get a DMA buffer for the vpd data resulting from the READ_REV
4080          * mailbox command.
4081          */
4082         dma_size = *vpd_size;
4083         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4084                                           dma_size,
4085                                           &dmabuf->phys,
4086                                           GFP_KERNEL);
4087         if (!dmabuf->virt) {
4088                 kfree(dmabuf);
4089                 return -ENOMEM;
4090         }
4091         memset(dmabuf->virt, 0, dma_size);
4092
4093         /*
4094          * The SLI4 implementation of READ_REV conflicts at word1,
4095          * bits 31:16 and SLI4 adds vpd functionality not present
4096          * in SLI3.  This code corrects the conflicts.
4097          */
4098         lpfc_read_rev(phba, mboxq);
4099         mqe = &mboxq->u.mqe;
4100         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4101         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4102         mqe->un.read_rev.word1 &= 0x0000FFFF;
4103         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4104         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4105
4106         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4107         if (rc) {
4108                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4109                                   dmabuf->virt, dmabuf->phys);
4110                 return -EIO;
4111         }
4112
4113         /*
4114          * The available vpd length cannot be bigger than the
4115          * DMA buffer passed to the port.  Catch the less than
4116          * case and update the caller's size.
4117          */
4118         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4119                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4120
4121         lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4122         dma_free_coherent(&phba->pcidev->dev, dma_size,
4123                           dmabuf->virt, dmabuf->phys);
4124         kfree(dmabuf);
4125         return 0;
4126 }
4127
4128 /**
4129  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4130  * @phba: pointer to lpfc hba data structure.
4131  *
4132  * This routine is called to explicitly arm the SLI4 device's completion and
4133  * event queues
4134  **/
4135 static void
4136 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4137 {
4138         uint8_t fcp_eqidx;
4139
4140         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4141         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4142         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4143                 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4144                                      LPFC_QUEUE_REARM);
4145         lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4146         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4147                 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4148                                      LPFC_QUEUE_REARM);
4149 }
4150
4151 /**
4152  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4153  * @phba: Pointer to HBA context object.
4154  *
4155  * This function is the main SLI4 device intialization PCI function. This
4156  * function is called by the HBA intialization code, HBA reset code and
4157  * HBA error attention handler code. Caller is not required to hold any
4158  * locks.
4159  **/
4160 int
4161 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4162 {
4163         int rc;
4164         LPFC_MBOXQ_t *mboxq;
4165         struct lpfc_mqe *mqe;
4166         uint8_t *vpd;
4167         uint32_t vpd_size;
4168         uint32_t ftr_rsp = 0;
4169         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4170         struct lpfc_vport *vport = phba->pport;
4171         struct lpfc_dmabuf *mp;
4172
4173         /* Perform a PCI function reset to start from clean */
4174         rc = lpfc_pci_function_reset(phba);
4175         if (unlikely(rc))
4176                 return -ENODEV;
4177
4178         /* Check the HBA Host Status Register for readyness */
4179         rc = lpfc_sli4_post_status_check(phba);
4180         if (unlikely(rc))
4181                 return -ENODEV;
4182         else {
4183                 spin_lock_irq(&phba->hbalock);
4184                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4185                 spin_unlock_irq(&phba->hbalock);
4186         }
4187
4188         /*
4189          * Allocate a single mailbox container for initializing the
4190          * port.
4191          */
4192         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4193         if (!mboxq)
4194                 return -ENOMEM;
4195
4196         /*
4197          * Continue initialization with default values even if driver failed
4198          * to read FCoE param config regions
4199          */
4200         if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4201                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4202                         "2570 Failed to read FCoE parameters\n");
4203
4204         /* Issue READ_REV to collect vpd and FW information. */
4205         vpd_size = PAGE_SIZE;
4206         vpd = kzalloc(vpd_size, GFP_KERNEL);
4207         if (!vpd) {
4208                 rc = -ENOMEM;
4209                 goto out_free_mbox;
4210         }
4211
4212         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4213         if (unlikely(rc))
4214                 goto out_free_vpd;
4215
4216         mqe = &mboxq->u.mqe;
4217         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4218         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4219                 phba->hba_flag |= HBA_FCOE_SUPPORT;
4220
4221         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4222                 LPFC_DCBX_CEE_MODE)
4223                 phba->hba_flag |= HBA_FIP_SUPPORT;
4224         else
4225                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4226
4227         if (phba->sli_rev != LPFC_SLI_REV4 ||
4228             !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4229                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4230                         "0376 READ_REV Error. SLI Level %d "
4231                         "FCoE enabled %d\n",
4232                         phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4233                 rc = -EIO;
4234                 goto out_free_vpd;
4235         }
4236         /*
4237          * Evaluate the read rev and vpd data. Populate the driver
4238          * state with the results. If this routine fails, the failure
4239          * is not fatal as the driver will use generic values.
4240          */
4241         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4242         if (unlikely(!rc)) {
4243                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4244                                 "0377 Error %d parsing vpd. "
4245                                 "Using defaults.\n", rc);
4246                 rc = 0;
4247         }
4248
4249         /* Save information as VPD data */
4250         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4251         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4252         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4253         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4254                                          &mqe->un.read_rev);
4255         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4256                                        &mqe->un.read_rev);
4257         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4258                                             &mqe->un.read_rev);
4259         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4260                                            &mqe->un.read_rev);
4261         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4262         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4263         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4264         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4265         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4266         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4267         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4268                         "(%d):0380 READ_REV Status x%x "
4269                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4270                         mboxq->vport ? mboxq->vport->vpi : 0,
4271                         bf_get(lpfc_mqe_status, mqe),
4272                         phba->vpd.rev.opFwName,
4273                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4274                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4275
4276         /*
4277          * Discover the port's supported feature set and match it against the
4278          * hosts requests.
4279          */
4280         lpfc_request_features(phba, mboxq);
4281         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4282         if (unlikely(rc)) {
4283                 rc = -EIO;
4284                 goto out_free_vpd;
4285         }
4286
4287         /*
4288          * The port must support FCP initiator mode as this is the
4289          * only mode running in the host.
4290          */
4291         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4292                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4293                                 "0378 No support for fcpi mode.\n");
4294                 ftr_rsp++;
4295         }
4296
4297         /*
4298          * If the port cannot support the host's requested features
4299          * then turn off the global config parameters to disable the
4300          * feature in the driver.  This is not a fatal error.
4301          */
4302         if ((phba->cfg_enable_bg) &&
4303             !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4304                 ftr_rsp++;
4305
4306         if (phba->max_vpi && phba->cfg_enable_npiv &&
4307             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4308                 ftr_rsp++;
4309
4310         if (ftr_rsp) {
4311                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4312                                 "0379 Feature Mismatch Data: x%08x %08x "
4313                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4314                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4315                                 phba->cfg_enable_npiv, phba->max_vpi);
4316                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4317                         phba->cfg_enable_bg = 0;
4318                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4319                         phba->cfg_enable_npiv = 0;
4320         }
4321
4322         /* These SLI3 features are assumed in SLI4 */
4323         spin_lock_irq(&phba->hbalock);
4324         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4325         spin_unlock_irq(&phba->hbalock);
4326
4327         /* Read the port's service parameters. */
4328         lpfc_read_sparam(phba, mboxq, vport->vpi);
4329         mboxq->vport = vport;
4330         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4331         mp = (struct lpfc_dmabuf *) mboxq->context1;
4332         if (rc == MBX_SUCCESS) {
4333                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4334                 rc = 0;
4335         }
4336
4337         /*
4338          * This memory was allocated by the lpfc_read_sparam routine. Release
4339          * it to the mbuf pool.
4340          */
4341         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4342         kfree(mp);
4343         mboxq->context1 = NULL;
4344         if (unlikely(rc)) {
4345                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4346                                 "0382 READ_SPARAM command failed "
4347                                 "status %d, mbxStatus x%x\n",
4348                                 rc, bf_get(lpfc_mqe_status, mqe));
4349                 phba->link_state = LPFC_HBA_ERROR;
4350                 rc = -EIO;
4351                 goto out_free_vpd;
4352         }
4353
4354         if (phba->cfg_soft_wwnn)
4355                 u64_to_wwn(phba->cfg_soft_wwnn,
4356                            vport->fc_sparam.nodeName.u.wwn);
4357         if (phba->cfg_soft_wwpn)
4358                 u64_to_wwn(phba->cfg_soft_wwpn,
4359                            vport->fc_sparam.portName.u.wwn);
4360         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4361                sizeof(struct lpfc_name));
4362         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4363                sizeof(struct lpfc_name));
4364
4365         /* Update the fc_host data structures with new wwn. */
4366         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4367         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4368
4369         /* Register SGL pool to the device using non-embedded mailbox command */
4370         rc = lpfc_sli4_post_sgl_list(phba);
4371         if (unlikely(rc)) {
4372                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4373                                 "0582 Error %d during sgl post operation\n",
4374                                         rc);
4375                 rc = -ENODEV;
4376                 goto out_free_vpd;
4377         }
4378
4379         /* Register SCSI SGL pool to the device */
4380         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4381         if (unlikely(rc)) {
4382                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4383                                 "0383 Error %d during scsi sgl post "
4384                                 "operation\n", rc);
4385                 /* Some Scsi buffers were moved to the abort scsi list */
4386                 /* A pci function reset will repost them */
4387                 rc = -ENODEV;
4388                 goto out_free_vpd;
4389         }
4390
4391         /* Post the rpi header region to the device. */
4392         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4393         if (unlikely(rc)) {
4394                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4395                                 "0393 Error %d during rpi post operation\n",
4396                                 rc);
4397                 rc = -ENODEV;
4398                 goto out_free_vpd;
4399         }
4400
4401         /* Set up all the queues to the device */
4402         rc = lpfc_sli4_queue_setup(phba);
4403         if (unlikely(rc)) {
4404                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4405                                 "0381 Error %d during queue setup.\n ", rc);
4406                 goto out_stop_timers;
4407         }
4408
4409         /* Arm the CQs and then EQs on device */
4410         lpfc_sli4_arm_cqeq_intr(phba);
4411
4412         /* Indicate device interrupt mode */
4413         phba->sli4_hba.intr_enable = 1;
4414
4415         /* Allow asynchronous mailbox command to go through */
4416         spin_lock_irq(&phba->hbalock);
4417         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4418         spin_unlock_irq(&phba->hbalock);
4419
4420         /* Post receive buffers to the device */
4421         lpfc_sli4_rb_setup(phba);
4422
4423         /* Start the ELS watchdog timer */
4424         mod_timer(&vport->els_tmofunc,
4425                   jiffies + HZ * (phba->fc_ratov * 2));
4426
4427         /* Start heart beat timer */
4428         mod_timer(&phba->hb_tmofunc,
4429                   jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4430         phba->hb_outstanding = 0;
4431         phba->last_completion_time = jiffies;
4432
4433         /* Start error attention (ERATT) polling timer */
4434         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4435
4436         /*
4437          * The port is ready, set the host's link state to LINK_DOWN
4438          * in preparation for link interrupts.
4439          */
4440         lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4441         mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4442         lpfc_set_loopback_flag(phba);
4443         /* Change driver state to LPFC_LINK_DOWN right before init link */
4444         spin_lock_irq(&phba->hbalock);
4445         phba->link_state = LPFC_LINK_DOWN;
4446         spin_unlock_irq(&phba->hbalock);
4447         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4448         if (unlikely(rc != MBX_NOT_FINISHED)) {
4449                 kfree(vpd);
4450                 return 0;
4451         } else
4452                 rc = -EIO;
4453
4454         /* Unset all the queues set up in this routine when error out */
4455         if (rc)
4456                 lpfc_sli4_queue_unset(phba);
4457
4458 out_stop_timers:
4459         if (rc)
4460                 lpfc_stop_hba_timers(phba);
4461 out_free_vpd:
4462         kfree(vpd);
4463 out_free_mbox:
4464         mempool_free(mboxq, phba->mbox_mem_pool);
4465         return rc;
4466 }
4467
4468 /**
4469  * lpfc_mbox_timeout - Timeout call back function for mbox timer
4470  * @ptr: context object - pointer to hba structure.
4471  *
4472  * This is the callback function for mailbox timer. The mailbox
4473  * timer is armed when a new mailbox command is issued and the timer
4474  * is deleted when the mailbox complete. The function is called by
4475  * the kernel timer code when a mailbox does not complete within
4476  * expected time. This function wakes up the worker thread to
4477  * process the mailbox timeout and returns. All the processing is
4478  * done by the worker thread function lpfc_mbox_timeout_handler.
4479  **/
4480 void
4481 lpfc_mbox_timeout(unsigned long ptr)
4482 {
4483         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
4484         unsigned long iflag;
4485         uint32_t tmo_posted;
4486
4487         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
4488         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
4489         if (!tmo_posted)
4490                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
4491         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
4492
4493         if (!tmo_posted)
4494                 lpfc_worker_wake_up(phba);
4495         return;
4496 }
4497
4498
4499 /**
4500  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
4501  * @phba: Pointer to HBA context object.
4502  *
4503  * This function is called from worker thread when a mailbox command times out.
4504  * The caller is not required to hold any locks. This function will reset the
4505  * HBA and recover all the pending commands.
4506  **/
4507 void
4508 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
4509 {
4510         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
4511         MAILBOX_t *mb = &pmbox->u.mb;
4512         struct lpfc_sli *psli = &phba->sli;
4513         struct lpfc_sli_ring *pring;
4514
4515         /* Check the pmbox pointer first.  There is a race condition
4516          * between the mbox timeout handler getting executed in the
4517          * worklist and the mailbox actually completing. When this
4518          * race condition occurs, the mbox_active will be NULL.
4519          */
4520         spin_lock_irq(&phba->hbalock);
4521         if (pmbox == NULL) {
4522                 lpfc_printf_log(phba, KERN_WARNING,
4523                                 LOG_MBOX | LOG_SLI,
4524                                 "0353 Active Mailbox cleared - mailbox timeout "
4525                                 "exiting\n");
4526                 spin_unlock_irq(&phba->hbalock);
4527                 return;
4528         }
4529
4530         /* Mbox cmd <mbxCommand> timeout */
4531         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4532                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
4533                         mb->mbxCommand,
4534                         phba->pport->port_state,
4535                         phba->sli.sli_flag,
4536                         phba->sli.mbox_active);
4537         spin_unlock_irq(&phba->hbalock);
4538
4539         /* Setting state unknown so lpfc_sli_abort_iocb_ring
4540          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
4541          * it to fail all oustanding SCSI IO.
4542          */
4543         spin_lock_irq(&phba->pport->work_port_lock);
4544         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4545         spin_unlock_irq(&phba->pport->work_port_lock);
4546         spin_lock_irq(&phba->hbalock);
4547         phba->link_state = LPFC_LINK_UNKNOWN;
4548         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4549         spin_unlock_irq(&phba->hbalock);
4550
4551         pring = &psli->ring[psli->fcp_ring];
4552         lpfc_sli_abort_iocb_ring(phba, pring);
4553
4554         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4555                         "0345 Resetting board due to mailbox timeout\n");
4556
4557         /* Reset the HBA device */
4558         lpfc_reset_hba(phba);
4559 }
4560
4561 /**
4562  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
4563  * @phba: Pointer to HBA context object.
4564  * @pmbox: Pointer to mailbox object.
4565  * @flag: Flag indicating how the mailbox need to be processed.
4566  *
4567  * This function is called by discovery code and HBA management code
4568  * to submit a mailbox command to firmware with SLI-3 interface spec. This
4569  * function gets the hbalock to protect the data structures.
4570  * The mailbox command can be submitted in polling mode, in which case
4571  * this function will wait in a polling loop for the completion of the
4572  * mailbox.
4573  * If the mailbox is submitted in no_wait mode (not polling) the
4574  * function will submit the command and returns immediately without waiting
4575  * for the mailbox completion. The no_wait is supported only when HBA
4576  * is in SLI2/SLI3 mode - interrupts are enabled.
4577  * The SLI interface allows only one mailbox pending at a time. If the
4578  * mailbox is issued in polling mode and there is already a mailbox
4579  * pending, then the function will return an error. If the mailbox is issued
4580  * in NO_WAIT mode and there is a mailbox pending already, the function
4581  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
4582  * The sli layer owns the mailbox object until the completion of mailbox
4583  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
4584  * return codes the caller owns the mailbox command after the return of
4585  * the function.
4586  **/
4587 static int
4588 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4589                        uint32_t flag)
4590 {
4591         MAILBOX_t *mb;
4592         struct lpfc_sli *psli = &phba->sli;
4593         uint32_t status, evtctr;
4594         uint32_t ha_copy;
4595         int i;
4596         unsigned long timeout;
4597         unsigned long drvr_flag = 0;
4598         uint32_t word0, ldata;
4599         void __iomem *to_slim;
4600         int processing_queue = 0;
4601
4602         spin_lock_irqsave(&phba->hbalock, drvr_flag);
4603         if (!pmbox) {
4604                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4605                 /* processing mbox queue from intr_handler */
4606                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4607                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4608                         return MBX_SUCCESS;
4609                 }
4610                 processing_queue = 1;
4611                 pmbox = lpfc_mbox_get(phba);
4612                 if (!pmbox) {
4613                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4614                         return MBX_SUCCESS;
4615                 }
4616         }
4617
4618         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
4619                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
4620                 if(!pmbox->vport) {
4621                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4622                         lpfc_printf_log(phba, KERN_ERR,
4623                                         LOG_MBOX | LOG_VPORT,
4624                                         "1806 Mbox x%x failed. No vport\n",
4625                                         pmbox->u.mb.mbxCommand);
4626                         dump_stack();
4627                         goto out_not_finished;
4628                 }
4629         }
4630
4631         /* If the PCI channel is in offline state, do not post mbox. */
4632         if (unlikely(pci_channel_offline(phba->pcidev))) {
4633                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4634                 goto out_not_finished;
4635         }
4636
4637         /* If HBA has a deferred error attention, fail the iocb. */
4638         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
4639                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4640                 goto out_not_finished;
4641         }
4642
4643         psli = &phba->sli;
4644
4645         mb = &pmbox->u.mb;
4646         status = MBX_SUCCESS;
4647
4648         if (phba->link_state == LPFC_HBA_ERROR) {
4649                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4650
4651                 /* Mbox command <mbxCommand> cannot issue */
4652                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4653                                 "(%d):0311 Mailbox command x%x cannot "
4654                                 "issue Data: x%x x%x\n",
4655                                 pmbox->vport ? pmbox->vport->vpi : 0,
4656                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
4657                 goto out_not_finished;
4658         }
4659
4660         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
4661             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
4662                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4663                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4664                                 "(%d):2528 Mailbox command x%x cannot "
4665                                 "issue Data: x%x x%x\n",
4666                                 pmbox->vport ? pmbox->vport->vpi : 0,
4667                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
4668                 goto out_not_finished;
4669         }
4670
4671         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
4672                 /* Polling for a mbox command when another one is already active
4673                  * is not allowed in SLI. Also, the driver must have established
4674                  * SLI2 mode to queue and process multiple mbox commands.
4675                  */
4676
4677                 if (flag & MBX_POLL) {
4678                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4679
4680                         /* Mbox command <mbxCommand> cannot issue */
4681                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4682                                         "(%d):2529 Mailbox command x%x "
4683                                         "cannot issue Data: x%x x%x\n",
4684                                         pmbox->vport ? pmbox->vport->vpi : 0,
4685                                         pmbox->u.mb.mbxCommand,
4686                                         psli->sli_flag, flag);
4687                         goto out_not_finished;
4688                 }
4689
4690                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
4691                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4692                         /* Mbox command <mbxCommand> cannot issue */
4693                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4694                                         "(%d):2530 Mailbox command x%x "
4695                                         "cannot issue Data: x%x x%x\n",
4696                                         pmbox->vport ? pmbox->vport->vpi : 0,
4697                                         pmbox->u.mb.mbxCommand,
4698                                         psli->sli_flag, flag);
4699                         goto out_not_finished;
4700                 }
4701
4702                 /* Another mailbox command is still being processed, queue this
4703                  * command to be processed later.
4704                  */
4705                 lpfc_mbox_put(phba, pmbox);
4706
4707                 /* Mbox cmd issue - BUSY */
4708                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4709                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
4710                                 "x%x x%x x%x x%x\n",
4711                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
4712                                 mb->mbxCommand, phba->pport->port_state,
4713                                 psli->sli_flag, flag);
4714
4715                 psli->slistat.mbox_busy++;
4716                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4717
4718                 if (pmbox->vport) {
4719                         lpfc_debugfs_disc_trc(pmbox->vport,
4720                                 LPFC_DISC_TRC_MBOX_VPORT,
4721                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
4722                                 (uint32_t)mb->mbxCommand,
4723                                 mb->un.varWords[0], mb->un.varWords[1]);
4724                 }
4725                 else {
4726                         lpfc_debugfs_disc_trc(phba->pport,
4727                                 LPFC_DISC_TRC_MBOX,
4728                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
4729                                 (uint32_t)mb->mbxCommand,
4730                                 mb->un.varWords[0], mb->un.varWords[1]);
4731                 }
4732
4733                 return MBX_BUSY;
4734         }
4735
4736         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4737
4738         /* If we are not polling, we MUST be in SLI2 mode */
4739         if (flag != MBX_POLL) {
4740                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
4741                     (mb->mbxCommand != MBX_KILL_BOARD)) {
4742                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4743                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4744                         /* Mbox command <mbxCommand> cannot issue */
4745                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4746                                         "(%d):2531 Mailbox command x%x "
4747                                         "cannot issue Data: x%x x%x\n",
4748                                         pmbox->vport ? pmbox->vport->vpi : 0,
4749                                         pmbox->u.mb.mbxCommand,
4750                                         psli->sli_flag, flag);
4751                         goto out_not_finished;
4752                 }
4753                 /* timeout active mbox command */
4754                 mod_timer(&psli->mbox_tmo, (jiffies +
4755                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
4756         }
4757
4758         /* Mailbox cmd <cmd> issue */
4759         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4760                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
4761                         "x%x\n",
4762                         pmbox->vport ? pmbox->vport->vpi : 0,
4763                         mb->mbxCommand, phba->pport->port_state,
4764                         psli->sli_flag, flag);
4765
4766         if (mb->mbxCommand != MBX_HEARTBEAT) {
4767                 if (pmbox->vport) {
4768                         lpfc_debugfs_disc_trc(pmbox->vport,
4769                                 LPFC_DISC_TRC_MBOX_VPORT,
4770                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
4771                                 (uint32_t)mb->mbxCommand,
4772                                 mb->un.varWords[0], mb->un.varWords[1]);
4773                 }
4774                 else {
4775                         lpfc_debugfs_disc_trc(phba->pport,
4776                                 LPFC_DISC_TRC_MBOX,
4777                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
4778                                 (uint32_t)mb->mbxCommand,
4779                                 mb->un.varWords[0], mb->un.varWords[1]);
4780                 }
4781         }
4782
4783         psli->slistat.mbox_cmd++;
4784         evtctr = psli->slistat.mbox_event;
4785
4786         /* next set own bit for the adapter and copy over command word */
4787         mb->mbxOwner = OWN_CHIP;
4788
4789         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4790                 /* First copy command data to host SLIM area */
4791                 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4792         } else {
4793                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4794                         /* copy command data into host mbox for cmpl */
4795                         lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4796                 }
4797
4798                 /* First copy mbox command data to HBA SLIM, skip past first
4799                    word */
4800                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4801                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
4802                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
4803
4804                 /* Next copy over first word, with mbxOwner set */
4805                 ldata = *((uint32_t *)mb);
4806                 to_slim = phba->MBslimaddr;
4807                 writel(ldata, to_slim);
4808                 readl(to_slim); /* flush */
4809
4810                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4811                         /* switch over to host mailbox */
4812                         psli->sli_flag |= LPFC_SLI_ACTIVE;
4813                 }
4814         }
4815
4816         wmb();
4817
4818         switch (flag) {
4819         case MBX_NOWAIT:
4820                 /* Set up reference to mailbox command */
4821                 psli->mbox_active = pmbox;
4822                 /* Interrupt board to do it */
4823                 writel(CA_MBATT, phba->CAregaddr);
4824                 readl(phba->CAregaddr); /* flush */
4825                 /* Don't wait for it to finish, just return */
4826                 break;
4827
4828         case MBX_POLL:
4829                 /* Set up null reference to mailbox command */
4830                 psli->mbox_active = NULL;
4831                 /* Interrupt board to do it */
4832                 writel(CA_MBATT, phba->CAregaddr);
4833                 readl(phba->CAregaddr); /* flush */
4834
4835                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4836                         /* First read mbox status word */
4837                         word0 = *((uint32_t *)phba->mbox);
4838                         word0 = le32_to_cpu(word0);
4839                 } else {
4840                         /* First read mbox status word */
4841                         word0 = readl(phba->MBslimaddr);
4842                 }
4843
4844                 /* Read the HBA Host Attention Register */
4845                 ha_copy = readl(phba->HAregaddr);
4846                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
4847                                                              mb->mbxCommand) *
4848                                            1000) + jiffies;
4849                 i = 0;
4850                 /* Wait for command to complete */
4851                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
4852                        (!(ha_copy & HA_MBATT) &&
4853                         (phba->link_state > LPFC_WARM_START))) {
4854                         if (time_after(jiffies, timeout)) {
4855                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4856                                 spin_unlock_irqrestore(&phba->hbalock,
4857                                                        drvr_flag);
4858                                 goto out_not_finished;
4859                         }
4860
4861                         /* Check if we took a mbox interrupt while we were
4862                            polling */
4863                         if (((word0 & OWN_CHIP) != OWN_CHIP)
4864                             && (evtctr != psli->slistat.mbox_event))
4865                                 break;
4866
4867                         if (i++ > 10) {
4868                                 spin_unlock_irqrestore(&phba->hbalock,
4869                                                        drvr_flag);
4870                                 msleep(1);
4871                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4872                         }
4873
4874                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4875                                 /* First copy command data */
4876                                 word0 = *((uint32_t *)phba->mbox);
4877                                 word0 = le32_to_cpu(word0);
4878                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4879                                         MAILBOX_t *slimmb;
4880                                         uint32_t slimword0;
4881                                         /* Check real SLIM for any errors */
4882                                         slimword0 = readl(phba->MBslimaddr);
4883                                         slimmb = (MAILBOX_t *) & slimword0;
4884                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
4885                                             && slimmb->mbxStatus) {
4886                                                 psli->sli_flag &=
4887                                                     ~LPFC_SLI_ACTIVE;
4888                                                 word0 = slimword0;
4889                                         }
4890                                 }
4891                         } else {
4892                                 /* First copy command data */
4893                                 word0 = readl(phba->MBslimaddr);
4894                         }
4895                         /* Read the HBA Host Attention Register */
4896                         ha_copy = readl(phba->HAregaddr);
4897                 }
4898
4899                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4900                         /* copy results back to user */
4901                         lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
4902                 } else {
4903                         /* First copy command data */
4904                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
4905                                                         MAILBOX_CMD_SIZE);
4906                         if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
4907                                 pmbox->context2) {
4908                                 lpfc_memcpy_from_slim((void *)pmbox->context2,
4909                                       phba->MBslimaddr + DMP_RSP_OFFSET,
4910                                                       mb->un.varDmp.word_cnt);
4911                         }
4912                 }
4913
4914                 writel(HA_MBATT, phba->HAregaddr);
4915                 readl(phba->HAregaddr); /* flush */
4916
4917                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4918                 status = mb->mbxStatus;
4919         }
4920
4921         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4922         return status;
4923
4924 out_not_finished:
4925         if (processing_queue) {
4926                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
4927                 lpfc_mbox_cmpl_put(phba, pmbox);
4928         }
4929         return MBX_NOT_FINISHED;
4930 }
4931
4932 /**
4933  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
4934  * @phba: Pointer to HBA context object.
4935  *
4936  * The function blocks the posting of SLI4 asynchronous mailbox commands from
4937  * the driver internal pending mailbox queue. It will then try to wait out the
4938  * possible outstanding mailbox command before return.
4939  *
4940  * Returns:
4941  *      0 - the outstanding mailbox command completed; otherwise, the wait for
4942  *      the outstanding mailbox command timed out.
4943  **/
4944 static int
4945 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
4946 {
4947         struct lpfc_sli *psli = &phba->sli;
4948         uint8_t actcmd = MBX_HEARTBEAT;
4949         int rc = 0;
4950         unsigned long timeout;
4951
4952         /* Mark the asynchronous mailbox command posting as blocked */
4953         spin_lock_irq(&phba->hbalock);
4954         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
4955         if (phba->sli.mbox_active)
4956                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
4957         spin_unlock_irq(&phba->hbalock);
4958         /* Determine how long we might wait for the active mailbox
4959          * command to be gracefully completed by firmware.
4960          */
4961         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
4962                                    jiffies;
4963         /* Wait for the outstnading mailbox command to complete */
4964         while (phba->sli.mbox_active) {
4965                 /* Check active mailbox complete status every 2ms */
4966                 msleep(2);
4967                 if (time_after(jiffies, timeout)) {
4968                         /* Timeout, marked the outstanding cmd not complete */
4969                         rc = 1;
4970                         break;
4971                 }
4972         }
4973
4974         /* Can not cleanly block async mailbox command, fails it */
4975         if (rc) {
4976                 spin_lock_irq(&phba->hbalock);
4977                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4978                 spin_unlock_irq(&phba->hbalock);
4979         }
4980         return rc;
4981 }
4982
4983 /**
4984  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
4985  * @phba: Pointer to HBA context object.
4986  *
4987  * The function unblocks and resume posting of SLI4 asynchronous mailbox
4988  * commands from the driver internal pending mailbox queue. It makes sure
4989  * that there is no outstanding mailbox command before resuming posting
4990  * asynchronous mailbox commands. If, for any reason, there is outstanding
4991  * mailbox command, it will try to wait it out before resuming asynchronous
4992  * mailbox command posting.
4993  **/
4994 static void
4995 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
4996 {
4997         struct lpfc_sli *psli = &phba->sli;
4998
4999         spin_lock_irq(&phba->hbalock);
5000         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5001                 /* Asynchronous mailbox posting is not blocked, do nothing */
5002                 spin_unlock_irq(&phba->hbalock);
5003                 return;
5004         }
5005
5006         /* Outstanding synchronous mailbox command is guaranteed to be done,
5007          * successful or timeout, after timing-out the outstanding mailbox
5008          * command shall always be removed, so just unblock posting async
5009          * mailbox command and resume
5010          */
5011         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5012         spin_unlock_irq(&phba->hbalock);
5013
5014         /* wake up worker thread to post asynchronlous mailbox command */
5015         lpfc_worker_wake_up(phba);
5016 }
5017
5018 /**
5019  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5020  * @phba: Pointer to HBA context object.
5021  * @mboxq: Pointer to mailbox object.
5022  *
5023  * The function posts a mailbox to the port.  The mailbox is expected
5024  * to be comletely filled in and ready for the port to operate on it.
5025  * This routine executes a synchronous completion operation on the
5026  * mailbox by polling for its completion.
5027  *
5028  * The caller must not be holding any locks when calling this routine.
5029  *
5030  * Returns:
5031  *      MBX_SUCCESS - mailbox posted successfully
5032  *      Any of the MBX error values.
5033  **/
5034 static int
5035 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5036 {
5037         int rc = MBX_SUCCESS;
5038         unsigned long iflag;
5039         uint32_t db_ready;
5040         uint32_t mcqe_status;
5041         uint32_t mbx_cmnd;
5042         unsigned long timeout;
5043         struct lpfc_sli *psli = &phba->sli;
5044         struct lpfc_mqe *mb = &mboxq->u.mqe;
5045         struct lpfc_bmbx_create *mbox_rgn;
5046         struct dma_address *dma_address;
5047         struct lpfc_register bmbx_reg;
5048
5049         /*
5050          * Only one mailbox can be active to the bootstrap mailbox region
5051          * at a time and there is no queueing provided.
5052          */
5053         spin_lock_irqsave(&phba->hbalock, iflag);
5054         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5055                 spin_unlock_irqrestore(&phba->hbalock, iflag);
5056                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5057                                 "(%d):2532 Mailbox command x%x (x%x) "
5058                                 "cannot issue Data: x%x x%x\n",
5059                                 mboxq->vport ? mboxq->vport->vpi : 0,
5060                                 mboxq->u.mb.mbxCommand,
5061                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5062                                 psli->sli_flag, MBX_POLL);
5063                 return MBXERR_ERROR;
5064         }
5065         /* The server grabs the token and owns it until release */
5066         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5067         phba->sli.mbox_active = mboxq;
5068         spin_unlock_irqrestore(&phba->hbalock, iflag);
5069
5070         /*
5071          * Initialize the bootstrap memory region to avoid stale data areas
5072          * in the mailbox post.  Then copy the caller's mailbox contents to
5073          * the bmbx mailbox region.
5074          */
5075         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5076         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5077         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5078                               sizeof(struct lpfc_mqe));
5079
5080         /* Post the high mailbox dma address to the port and wait for ready. */
5081         dma_address = &phba->sli4_hba.bmbx.dma_address;
5082         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5083
5084         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5085                                    * 1000) + jiffies;
5086         do {
5087                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5088                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5089                 if (!db_ready)
5090                         msleep(2);
5091
5092                 if (time_after(jiffies, timeout)) {
5093                         rc = MBXERR_ERROR;
5094                         goto exit;
5095                 }
5096         } while (!db_ready);
5097
5098         /* Post the low mailbox dma address to the port. */
5099         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5100         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5101                                    * 1000) + jiffies;
5102         do {
5103                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5104                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5105                 if (!db_ready)
5106                         msleep(2);
5107
5108                 if (time_after(jiffies, timeout)) {
5109                         rc = MBXERR_ERROR;
5110                         goto exit;
5111                 }
5112         } while (!db_ready);
5113
5114         /*
5115          * Read the CQ to ensure the mailbox has completed.
5116          * If so, update the mailbox status so that the upper layers
5117          * can complete the request normally.
5118          */
5119         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5120                               sizeof(struct lpfc_mqe));
5121         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5122         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5123                               sizeof(struct lpfc_mcqe));
5124         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5125
5126         /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5127         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5128                 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5129                 rc = MBXERR_ERROR;
5130         }
5131
5132         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5133                         "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5134                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5135                         " x%x x%x CQ: x%x x%x x%x x%x\n",
5136                         mboxq->vport ? mboxq->vport->vpi : 0,
5137                         mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5138                         bf_get(lpfc_mqe_status, mb),
5139                         mb->un.mb_words[0], mb->un.mb_words[1],
5140                         mb->un.mb_words[2], mb->un.mb_words[3],
5141                         mb->un.mb_words[4], mb->un.mb_words[5],
5142                         mb->un.mb_words[6], mb->un.mb_words[7],
5143                         mb->un.mb_words[8], mb->un.mb_words[9],
5144                         mb->un.mb_words[10], mb->un.mb_words[11],
5145                         mb->un.mb_words[12], mboxq->mcqe.word0,
5146                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5147                         mboxq->mcqe.trailer);
5148 exit:
5149         /* We are holding the token, no needed for lock when release */
5150         spin_lock_irqsave(&phba->hbalock, iflag);
5151         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5152         phba->sli.mbox_active = NULL;
5153         spin_unlock_irqrestore(&phba->hbalock, iflag);
5154         return rc;
5155 }
5156
5157 /**
5158  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5159  * @phba: Pointer to HBA context object.
5160  * @pmbox: Pointer to mailbox object.
5161  * @flag: Flag indicating how the mailbox need to be processed.
5162  *
5163  * This function is called by discovery code and HBA management code to submit
5164  * a mailbox command to firmware with SLI-4 interface spec.
5165  *
5166  * Return codes the caller owns the mailbox command after the return of the
5167  * function.
5168  **/
5169 static int
5170 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5171                        uint32_t flag)
5172 {
5173         struct lpfc_sli *psli = &phba->sli;
5174         unsigned long iflags;
5175         int rc;
5176
5177         rc = lpfc_mbox_dev_check(phba);
5178         if (unlikely(rc)) {
5179                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5180                                 "(%d):2544 Mailbox command x%x (x%x) "
5181                                 "cannot issue Data: x%x x%x\n",
5182                                 mboxq->vport ? mboxq->vport->vpi : 0,
5183                                 mboxq->u.mb.mbxCommand,
5184                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5185                                 psli->sli_flag, flag);
5186                 goto out_not_finished;
5187         }
5188
5189         /* Detect polling mode and jump to a handler */
5190         if (!phba->sli4_hba.intr_enable) {
5191                 if (flag == MBX_POLL)
5192                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5193                 else
5194                         rc = -EIO;
5195                 if (rc != MBX_SUCCESS)
5196                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5197                                         "(%d):2541 Mailbox command x%x "
5198                                         "(x%x) cannot issue Data: x%x x%x\n",
5199                                         mboxq->vport ? mboxq->vport->vpi : 0,
5200                                         mboxq->u.mb.mbxCommand,
5201                                         lpfc_sli4_mbox_opcode_get(phba, mboxq),
5202                                         psli->sli_flag, flag);
5203                 return rc;
5204         } else if (flag == MBX_POLL) {
5205                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5206                                 "(%d):2542 Try to issue mailbox command "
5207                                 "x%x (x%x) synchronously ahead of async"
5208                                 "mailbox command queue: x%x x%x\n",
5209                                 mboxq->vport ? mboxq->vport->vpi : 0,
5210                                 mboxq->u.mb.mbxCommand,
5211                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5212                                 psli->sli_flag, flag);
5213                 /* Try to block the asynchronous mailbox posting */
5214                 rc = lpfc_sli4_async_mbox_block(phba);
5215                 if (!rc) {
5216                         /* Successfully blocked, now issue sync mbox cmd */
5217                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5218                         if (rc != MBX_SUCCESS)
5219                                 lpfc_printf_log(phba, KERN_ERR,
5220                                                 LOG_MBOX | LOG_SLI,
5221                                                 "(%d):2597 Mailbox command "
5222                                                 "x%x (x%x) cannot issue "
5223                                                 "Data: x%x x%x\n",
5224                                                 mboxq->vport ?
5225                                                 mboxq->vport->vpi : 0,
5226                                                 mboxq->u.mb.mbxCommand,
5227                                                 lpfc_sli4_mbox_opcode_get(phba,
5228                                                                 mboxq),
5229                                                 psli->sli_flag, flag);
5230                         /* Unblock the async mailbox posting afterward */
5231                         lpfc_sli4_async_mbox_unblock(phba);
5232                 }
5233                 return rc;
5234         }
5235
5236         /* Now, interrupt mode asynchrous mailbox command */
5237         rc = lpfc_mbox_cmd_check(phba, mboxq);
5238         if (rc) {
5239                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5240                                 "(%d):2543 Mailbox command x%x (x%x) "
5241                                 "cannot issue Data: x%x x%x\n",
5242                                 mboxq->vport ? mboxq->vport->vpi : 0,
5243                                 mboxq->u.mb.mbxCommand,
5244                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5245                                 psli->sli_flag, flag);
5246                 goto out_not_finished;
5247         }
5248
5249         /* Put the mailbox command to the driver internal FIFO */
5250         psli->slistat.mbox_busy++;
5251         spin_lock_irqsave(&phba->hbalock, iflags);
5252         lpfc_mbox_put(phba, mboxq);
5253         spin_unlock_irqrestore(&phba->hbalock, iflags);
5254         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5255                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
5256                         "x%x (x%x) x%x x%x x%x\n",
5257                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5258                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5259                         lpfc_sli4_mbox_opcode_get(phba, mboxq),
5260                         phba->pport->port_state,
5261                         psli->sli_flag, MBX_NOWAIT);
5262         /* Wake up worker thread to transport mailbox command from head */
5263         lpfc_worker_wake_up(phba);
5264
5265         return MBX_BUSY;
5266
5267 out_not_finished:
5268         return MBX_NOT_FINISHED;
5269 }
5270
5271 /**
5272  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5273  * @phba: Pointer to HBA context object.
5274  *
5275  * This function is called by worker thread to send a mailbox command to
5276  * SLI4 HBA firmware.
5277  *
5278  **/
5279 int
5280 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5281 {
5282         struct lpfc_sli *psli = &phba->sli;
5283         LPFC_MBOXQ_t *mboxq;
5284         int rc = MBX_SUCCESS;
5285         unsigned long iflags;
5286         struct lpfc_mqe *mqe;
5287         uint32_t mbx_cmnd;
5288
5289         /* Check interrupt mode before post async mailbox command */
5290         if (unlikely(!phba->sli4_hba.intr_enable))
5291                 return MBX_NOT_FINISHED;
5292
5293         /* Check for mailbox command service token */
5294         spin_lock_irqsave(&phba->hbalock, iflags);
5295         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5296                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5297                 return MBX_NOT_FINISHED;
5298         }
5299         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5300                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5301                 return MBX_NOT_FINISHED;
5302         }
5303         if (unlikely(phba->sli.mbox_active)) {
5304                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5305                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5306                                 "0384 There is pending active mailbox cmd\n");
5307                 return MBX_NOT_FINISHED;
5308         }
5309         /* Take the mailbox command service token */
5310         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5311
5312         /* Get the next mailbox command from head of queue */
5313         mboxq = lpfc_mbox_get(phba);
5314
5315         /* If no more mailbox command waiting for post, we're done */
5316         if (!mboxq) {
5317                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5318                 spin_unlock_irqrestore(&phba->hbalock, iflags);
5319                 return MBX_SUCCESS;
5320         }
5321         phba->sli.mbox_active = mboxq;
5322         spin_unlock_irqrestore(&phba->hbalock, iflags);
5323
5324         /* Check device readiness for posting mailbox command */
5325         rc = lpfc_mbox_dev_check(phba);
5326         if (unlikely(rc))
5327                 /* Driver clean routine will clean up pending mailbox */
5328                 goto out_not_finished;
5329
5330         /* Prepare the mbox command to be posted */
5331         mqe = &mboxq->u.mqe;
5332         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5333
5334         /* Start timer for the mbox_tmo and log some mailbox post messages */
5335         mod_timer(&psli->mbox_tmo, (jiffies +
5336                   (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5337
5338         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5339                         "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5340                         "x%x x%x\n",
5341                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5342                         lpfc_sli4_mbox_opcode_get(phba, mboxq),
5343                         phba->pport->port_state, psli->sli_flag);
5344
5345         if (mbx_cmnd != MBX_HEARTBEAT) {
5346                 if (mboxq->vport) {
5347                         lpfc_debugfs_disc_trc(mboxq->vport,
5348                                 LPFC_DISC_TRC_MBOX_VPORT,
5349                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5350                                 mbx_cmnd, mqe->un.mb_words[0],
5351                                 mqe->un.mb_words[1]);
5352                 } else {
5353                         lpfc_debugfs_disc_trc(phba->pport,
5354                                 LPFC_DISC_TRC_MBOX,
5355                                 "MBOX Send: cmd:x%x mb:x%x x%x",
5356                                 mbx_cmnd, mqe->un.mb_words[0],
5357                                 mqe->un.mb_words[1]);
5358                 }
5359         }
5360         psli->slistat.mbox_cmd++;
5361
5362         /* Post the mailbox command to the port */
5363         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5364         if (rc != MBX_SUCCESS) {
5365                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5366                                 "(%d):2533 Mailbox command x%x (x%x) "
5367                                 "cannot issue Data: x%x x%x\n",
5368                                 mboxq->vport ? mboxq->vport->vpi : 0,
5369                                 mboxq->u.mb.mbxCommand,
5370                                 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5371                                 psli->sli_flag, MBX_NOWAIT);
5372                 goto out_not_finished;
5373         }
5374
5375         return rc;
5376
5377 out_not_finished:
5378         spin_lock_irqsave(&phba->hbalock, iflags);
5379         mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5380         __lpfc_mbox_cmpl_put(phba, mboxq);
5381         /* Release the token */
5382         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5383         phba->sli.mbox_active = NULL;
5384         spin_unlock_irqrestore(&phba->hbalock, iflags);
5385
5386         return MBX_NOT_FINISHED;
5387 }
5388
5389 /**
5390  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5391  * @phba: Pointer to HBA context object.
5392  * @pmbox: Pointer to mailbox object.
5393  * @flag: Flag indicating how the mailbox need to be processed.
5394  *
5395  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5396  * the API jump table function pointer from the lpfc_hba struct.
5397  *
5398  * Return codes the caller owns the mailbox command after the return of the
5399  * function.
5400  **/
5401 int
5402 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5403 {
5404         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5405 }
5406
5407 /**
5408  * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5409  * @phba: The hba struct for which this call is being executed.
5410  * @dev_grp: The HBA PCI-Device group number.
5411  *
5412  * This routine sets up the mbox interface API function jump table in @phba
5413  * struct.
5414  * Returns: 0 - success, -ENODEV - failure.
5415  **/
5416 int
5417 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5418 {
5419
5420         switch (dev_grp) {
5421         case LPFC_PCI_DEV_LP:
5422                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5423                 phba->lpfc_sli_handle_slow_ring_event =
5424                                 lpfc_sli_handle_slow_ring_event_s3;
5425                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5426                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5427                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5428                 break;
5429         case LPFC_PCI_DEV_OC:
5430                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5431                 phba->lpfc_sli_handle_slow_ring_event =
5432                                 lpfc_sli_handle_slow_ring_event_s4;
5433                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5434                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5435                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5436                 break;
5437         default:
5438                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5439                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
5440                                 dev_grp);
5441                 return -ENODEV;
5442                 break;
5443         }
5444         return 0;
5445 }
5446
5447 /**
5448  * __lpfc_sli_ringtx_put - Add an iocb to the txq
5449  * @phba: Pointer to HBA context object.
5450  * @pring: Pointer to driver SLI ring object.
5451  * @piocb: Pointer to address of newly added command iocb.
5452  *
5453  * This function is called with hbalock held to add a command
5454  * iocb to the txq when SLI layer cannot submit the command iocb
5455  * to the ring.
5456  **/
5457 static void
5458 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5459                     struct lpfc_iocbq *piocb)
5460 {
5461         /* Insert the caller's iocb in the txq tail for later processing. */
5462         list_add_tail(&piocb->list, &pring->txq);
5463         pring->txq_cnt++;
5464 }
5465
5466 /**
5467  * lpfc_sli_next_iocb - Get the next iocb in the txq
5468  * @phba: Pointer to HBA context object.
5469  * @pring: Pointer to driver SLI ring object.
5470  * @piocb: Pointer to address of newly added command iocb.
5471  *
5472  * This function is called with hbalock held before a new
5473  * iocb is submitted to the firmware. This function checks
5474  * txq to flush the iocbs in txq to Firmware before
5475  * submitting new iocbs to the Firmware.
5476  * If there are iocbs in the txq which need to be submitted
5477  * to firmware, lpfc_sli_next_iocb returns the first element
5478  * of the txq after dequeuing it from txq.
5479  * If there is no iocb in the txq then the function will return
5480  * *piocb and *piocb is set to NULL. Caller needs to check
5481  * *piocb to find if there are more commands in the txq.
5482  **/
5483 static struct lpfc_iocbq *
5484 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5485                    struct lpfc_iocbq **piocb)
5486 {
5487         struct lpfc_iocbq * nextiocb;
5488
5489         nextiocb = lpfc_sli_ringtx_get(phba, pring);
5490         if (!nextiocb) {
5491                 nextiocb = *piocb;
5492                 *piocb = NULL;
5493         }
5494
5495         return nextiocb;
5496 }
5497
5498 /**
5499  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
5500  * @phba: Pointer to HBA context object.
5501  * @ring_number: SLI ring number to issue iocb on.
5502  * @piocb: Pointer to command iocb.
5503  * @flag: Flag indicating if this command can be put into txq.
5504  *
5505  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
5506  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
5507  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
5508  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
5509  * this function allows only iocbs for posting buffers. This function finds
5510  * next available slot in the command ring and posts the command to the
5511  * available slot and writes the port attention register to request HBA start
5512  * processing new iocb. If there is no slot available in the ring and
5513  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
5514  * the function returns IOCB_BUSY.
5515  *
5516  * This function is called with hbalock held. The function will return success
5517  * after it successfully submit the iocb to firmware or after adding to the
5518  * txq.
5519  **/
5520 static int
5521 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
5522                     struct lpfc_iocbq *piocb, uint32_t flag)
5523 {
5524         struct lpfc_iocbq *nextiocb;
5525         IOCB_t *iocb;
5526         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
5527
5528         if (piocb->iocb_cmpl && (!piocb->vport) &&
5529            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
5530            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
5531                 lpfc_printf_log(phba, KERN_ERR,
5532                                 LOG_SLI | LOG_VPORT,
5533                                 "1807 IOCB x%x failed. No vport\n",
5534                                 piocb->iocb.ulpCommand);
5535                 dump_stack();
5536                 return IOCB_ERROR;
5537         }
5538
5539
5540         /* If the PCI channel is in offline state, do not post iocbs. */
5541         if (unlikely(pci_channel_offline(phba->pcidev)))
5542                 return IOCB_ERROR;
5543
5544         /* If HBA has a deferred error attention, fail the iocb. */
5545         if (unlikely(phba->hba_flag & DEFER_ERATT))
5546                 return IOCB_ERROR;
5547
5548         /*
5549          * We should never get an IOCB if we are in a < LINK_DOWN state
5550          */
5551         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5552                 return IOCB_ERROR;
5553
5554         /*
5555          * Check to see if we are blocking IOCB processing because of a
5556          * outstanding event.
5557          */
5558         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
5559                 goto iocb_busy;
5560
5561         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
5562                 /*
5563                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
5564                  * can be issued if the link is not up.
5565                  */
5566                 switch (piocb->iocb.ulpCommand) {
5567                 case CMD_GEN_REQUEST64_CR:
5568                 case CMD_GEN_REQUEST64_CX:
5569                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5570                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
5571                                         FC_RCTL_DD_UNSOL_CMD) ||
5572                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5573                                         MENLO_TRANSPORT_TYPE))
5574
5575                                 goto iocb_busy;
5576                         break;
5577                 case CMD_QUE_RING_BUF_CN:
5578                 case CMD_QUE_RING_BUF64_CN:
5579                         /*
5580                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
5581                          * completion, iocb_cmpl MUST be 0.
5582                          */
5583                         if (piocb->iocb_cmpl)
5584                                 piocb->iocb_cmpl = NULL;
5585                         /*FALLTHROUGH*/
5586                 case CMD_CREATE_XRI_CR:
5587                 case CMD_CLOSE_XRI_CN:
5588                 case CMD_CLOSE_XRI_CX:
5589                         break;
5590                 default:
5591                         goto iocb_busy;
5592                 }
5593
5594         /*
5595          * For FCP commands, we must be in a state where we can process link
5596          * attention events.
5597          */
5598         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
5599                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
5600                 goto iocb_busy;
5601         }
5602
5603         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
5604                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
5605                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
5606
5607         if (iocb)
5608                 lpfc_sli_update_ring(phba, pring);
5609         else
5610                 lpfc_sli_update_full_ring(phba, pring);
5611
5612         if (!piocb)
5613                 return IOCB_SUCCESS;
5614
5615         goto out_busy;
5616
5617  iocb_busy:
5618         pring->stats.iocb_cmd_delay++;
5619
5620  out_busy:
5621
5622         if (!(flag & SLI_IOCB_RET_IOCB)) {
5623                 __lpfc_sli_ringtx_put(phba, pring, piocb);
5624                 return IOCB_SUCCESS;
5625         }
5626
5627         return IOCB_BUSY;
5628 }
5629
5630 /**
5631  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5632  * @phba: Pointer to HBA context object.
5633  * @piocb: Pointer to command iocb.
5634  * @sglq: Pointer to the scatter gather queue object.
5635  *
5636  * This routine converts the bpl or bde that is in the IOCB
5637  * to a sgl list for the sli4 hardware. The physical address
5638  * of the bpl/bde is converted back to a virtual address.
5639  * If the IOCB contains a BPL then the list of BDE's is
5640  * converted to sli4_sge's. If the IOCB contains a single
5641  * BDE then it is converted to a single sli_sge.
5642  * The IOCB is still in cpu endianess so the contents of
5643  * the bpl can be used without byte swapping.
5644  *
5645  * Returns valid XRI = Success, NO_XRI = Failure.
5646 **/
5647 static uint16_t
5648 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5649                 struct lpfc_sglq *sglq)
5650 {
5651         uint16_t xritag = NO_XRI;
5652         struct ulp_bde64 *bpl = NULL;
5653         struct ulp_bde64 bde;
5654         struct sli4_sge *sgl  = NULL;
5655         IOCB_t *icmd;
5656         int numBdes = 0;
5657         int i = 0;
5658
5659         if (!piocbq || !sglq)
5660                 return xritag;
5661
5662         sgl  = (struct sli4_sge *)sglq->sgl;
5663         icmd = &piocbq->iocb;
5664         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5665                 numBdes = icmd->un.genreq64.bdl.bdeSize /
5666                                 sizeof(struct ulp_bde64);
5667                 /* The addrHigh and addrLow fields within the IOCB
5668                  * have not been byteswapped yet so there is no
5669                  * need to swap them back.
5670                  */
5671                 bpl  = (struct ulp_bde64 *)
5672                         ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5673
5674                 if (!bpl)
5675                         return xritag;
5676
5677                 for (i = 0; i < numBdes; i++) {
5678                         /* Should already be byte swapped. */
5679                         sgl->addr_hi =  bpl->addrHigh;
5680                         sgl->addr_lo =  bpl->addrLow;
5681                         /* swap the size field back to the cpu so we
5682                          * can assign it to the sgl.
5683                          */
5684                         bde.tus.w  = le32_to_cpu(bpl->tus.w);
5685                         bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5686                         if ((i+1) == numBdes)
5687                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
5688                         else
5689                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
5690                         sgl->word2 = cpu_to_le32(sgl->word2);
5691                         sgl->word3 = cpu_to_le32(sgl->word3);
5692                         bpl++;
5693                         sgl++;
5694                 }
5695         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5696                         /* The addrHigh and addrLow fields of the BDE have not
5697                          * been byteswapped yet so they need to be swapped
5698                          * before putting them in the sgl.
5699                          */
5700                         sgl->addr_hi =
5701                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5702                         sgl->addr_lo =
5703                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5704                         bf_set(lpfc_sli4_sge_len, sgl,
5705                                 icmd->un.genreq64.bdl.bdeSize);
5706                         bf_set(lpfc_sli4_sge_last, sgl, 1);
5707                         sgl->word2 = cpu_to_le32(sgl->word2);
5708                         sgl->word3 = cpu_to_le32(sgl->word3);
5709         }
5710         return sglq->sli4_xritag;
5711 }
5712
5713 /**
5714  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5715  * @phba: Pointer to HBA context object.
5716  *
5717  * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5718  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
5719  * held.
5720  *
5721  * Return: index into SLI4 fast-path FCP queue index.
5722  **/
5723 static uint32_t
5724 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
5725 {
5726         ++phba->fcp_qidx;
5727         if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
5728                 phba->fcp_qidx = 0;
5729
5730         return phba->fcp_qidx;
5731 }
5732
5733 /**
5734  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5735  * @phba: Pointer to HBA context object.
5736  * @piocb: Pointer to command iocb.
5737  * @wqe: Pointer to the work queue entry.
5738  *
5739  * This routine converts the iocb command to its Work Queue Entry
5740  * equivalent. The wqe pointer should not have any fields set when
5741  * this routine is called because it will memcpy over them.
5742  * This routine does not set the CQ_ID or the WQEC bits in the
5743  * wqe.
5744  *
5745  * Returns: 0 = Success, IOCB_ERROR = Failure.
5746  **/
5747 static int
5748 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5749                 union lpfc_wqe *wqe)
5750 {
5751         uint32_t payload_len = 0;
5752         uint8_t ct = 0;
5753         uint32_t fip;
5754         uint32_t abort_tag;
5755         uint8_t command_type = ELS_COMMAND_NON_FIP;
5756         uint8_t cmnd;
5757         uint16_t xritag;
5758         struct ulp_bde64 *bpl = NULL;
5759         uint32_t els_id = ELS_ID_DEFAULT;
5760
5761         fip = phba->hba_flag & HBA_FIP_SUPPORT;
5762         /* The fcp commands will set command type */
5763         if (iocbq->iocb_flag &  LPFC_IO_FCP)
5764                 command_type = FCP_COMMAND;
5765         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
5766                 command_type = ELS_COMMAND_FIP;
5767         else
5768                 command_type = ELS_COMMAND_NON_FIP;
5769
5770         /* Some of the fields are in the right position already */
5771         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5772         abort_tag = (uint32_t) iocbq->iotag;
5773         xritag = iocbq->sli4_xritag;
5774         wqe->words[7] = 0; /* The ct field has moved so reset */
5775         /* words0-2 bpl convert bde */
5776         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5777                 bpl  = (struct ulp_bde64 *)
5778                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5779                 if (!bpl)
5780                         return IOCB_ERROR;
5781
5782                 /* Should already be byte swapped. */
5783                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
5784                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
5785                 /* swap the size field back to the cpu so we
5786                  * can assign it to the sgl.
5787                  */
5788                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
5789                 payload_len = wqe->generic.bde.tus.f.bdeSize;
5790         } else
5791                 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5792
5793         iocbq->iocb.ulpIoTag = iocbq->iotag;
5794         cmnd = iocbq->iocb.ulpCommand;
5795
5796         switch (iocbq->iocb.ulpCommand) {
5797         case CMD_ELS_REQUEST64_CR:
5798                 if (!iocbq->iocb.ulpLe) {
5799                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5800                                 "2007 Only Limited Edition cmd Format"
5801                                 " supported 0x%x\n",
5802                                 iocbq->iocb.ulpCommand);
5803                         return IOCB_ERROR;
5804                 }
5805                 wqe->els_req.payload_len = payload_len;
5806                 /* Els_reguest64 has a TMO */
5807                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5808                         iocbq->iocb.ulpTimeout);
5809                 /* Need a VF for word 4 set the vf bit*/
5810                 bf_set(els_req64_vf, &wqe->els_req, 0);
5811                 /* And a VFID for word 12 */
5812                 bf_set(els_req64_vfid, &wqe->els_req, 0);
5813                 /*
5814                  * Set ct field to 3, indicates that the context_tag field
5815                  * contains the FCFI and remote N_Port_ID is
5816                  * in word 5.
5817                  */
5818
5819                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5820                 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5821                                 iocbq->iocb.ulpContext);
5822
5823                 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5824                 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5825                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5826
5827                 if (command_type == ELS_COMMAND_FIP) {
5828                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
5829                                         >> LPFC_FIP_ELS_ID_SHIFT);
5830                 }
5831                 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
5832
5833         break;
5834         case CMD_XMIT_SEQUENCE64_CR:
5835                 /* word3 iocb=io_tag32 wqe=payload_offset */
5836                 /* payload offset used for multilpe outstanding
5837                  * sequences on the same exchange
5838                  */
5839                 wqe->words[3] = 0;
5840                 /* word4 relative_offset memcpy */
5841                 /* word5 r_ctl/df_ctl memcpy */
5842                 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5843                 wqe->xmit_sequence.xmit_len = payload_len;
5844         break;
5845         case CMD_XMIT_BCAST64_CN:
5846                 /* word3 iocb=iotag32 wqe=payload_len */
5847                 wqe->words[3] = 0; /* no definition for this in wqe */
5848                 /* word4 iocb=rsvd wqe=rsvd */
5849                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5850                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5851                 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5852                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5853         break;
5854         case CMD_FCP_IWRITE64_CR:
5855                 command_type = FCP_COMMAND_DATA_OUT;
5856                 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5857                  * confusing.
5858                  * word3 is payload_len: byte offset to the sgl entry for the
5859                  * fcp_command.
5860                  * word4 is total xfer len, same as the IOCB->ulpParameter.
5861                  * word5 is initial xfer len 0 = wait for xfer-ready
5862                  */
5863
5864                 /* Always wait for xfer-ready before sending data */
5865                 wqe->fcp_iwrite.initial_xfer_len = 0;
5866                 /* word 4 (xfer length) should have been set on the memcpy */
5867
5868         /* allow write to fall through to read */
5869         case CMD_FCP_IREAD64_CR:
5870                 /* FCP_CMD is always the 1st sgl entry */
5871                 wqe->fcp_iread.payload_len =
5872                         payload_len + sizeof(struct fcp_rsp);
5873
5874                 /* word 4 (xfer length) should have been set on the memcpy */
5875
5876                 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5877                         iocbq->iocb.ulpFCP2Rcvy);
5878                 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5879                 /* The XC bit and the XS bit are similar. The driver never
5880                  * tracked whether or not the exchange was previouslly open.
5881                  * XC = Exchange create, 0 is create. 1 is already open.
5882                  * XS = link cmd: 1 do not close the exchange after command.
5883                  * XS = 0 close exchange when command completes.
5884                  * The only time we would not set the XC bit is when the XS bit
5885                  * is set and we are sending our 2nd or greater command on
5886                  * this exchange.
5887                  */
5888                 /* Always open the exchange */
5889                 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5890
5891                 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5892                 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5893                 break;
5894         case CMD_FCP_ICMND64_CR:
5895                 /* Always open the exchange */
5896                 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5897
5898                 wqe->words[4] = 0;
5899                 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5900                 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5901         break;
5902         case CMD_GEN_REQUEST64_CR:
5903                 /* word3 command length is described as byte offset to the
5904                  * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5905                  * sgl[0] = cmnd
5906                  * sgl[1] = rsp.
5907                  *
5908                  */
5909                 wqe->gen_req.command_len = payload_len;
5910                 /* Word4 parameter  copied in the memcpy */
5911                 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5912                 /* word6 context tag copied in memcpy */
5913                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
5914                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5915                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5916                                 "2015 Invalid CT %x command 0x%x\n",
5917                                 ct, iocbq->iocb.ulpCommand);
5918                         return IOCB_ERROR;
5919                 }
5920                 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5921                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5922                         iocbq->iocb.ulpTimeout);
5923
5924                 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5925                 command_type = OTHER_COMMAND;
5926         break;
5927         case CMD_XMIT_ELS_RSP64_CX:
5928                 /* words0-2 BDE memcpy */
5929                 /* word3 iocb=iotag32 wqe=rsvd */
5930                 wqe->words[3] = 0;
5931                 /* word4 iocb=did wge=rsvd. */
5932                 wqe->words[4] = 0;
5933                 /* word5 iocb=rsvd wge=did */
5934                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5935                          iocbq->iocb.un.elsreq64.remoteID);
5936
5937                 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5938                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5939
5940                 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5941                 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5942                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5943                         bf_set(lpfc_wqe_gen_context, &wqe->generic,
5944                                iocbq->vport->vpi + phba->vpi_base);
5945                 command_type = OTHER_COMMAND;
5946         break;
5947         case CMD_CLOSE_XRI_CN:
5948         case CMD_ABORT_XRI_CN:
5949         case CMD_ABORT_XRI_CX:
5950                 /* words 0-2 memcpy should be 0 rserved */
5951                 /* port will send abts */
5952                 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5953                         /*
5954                          * The link is down so the fw does not need to send abts
5955                          * on the wire.
5956                          */
5957                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5958                 else
5959                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5960                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5961                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5962                 wqe->words[5] = 0;
5963                 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5964                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5965                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5966                 wqe->generic.abort_tag = abort_tag;
5967                 /*
5968                  * The abort handler will send us CMD_ABORT_XRI_CN or
5969                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5970                  */
5971                 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
5972                 cmnd = CMD_ABORT_XRI_CX;
5973                 command_type = OTHER_COMMAND;
5974                 xritag = 0;
5975         break;
5976         case CMD_XMIT_BLS_RSP64_CX:
5977                 /* As BLS ABTS-ACC WQE is very different from other WQEs,
5978                  * we re-construct this WQE here based on information in
5979                  * iocbq from scratch.
5980                  */
5981                 memset(wqe, 0, sizeof(union lpfc_wqe));
5982                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
5983                        iocbq->iocb.un.ulpWord[3]);
5984                 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
5985                        iocbq->sli4_xritag);
5986                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
5987                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
5988                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
5989                        iocbq->iocb.ulpContext);
5990                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
5991                 command_type = OTHER_COMMAND;
5992         break;
5993         case CMD_XRI_ABORTED_CX:
5994         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
5995                 /* words0-2 are all 0's no bde */
5996                 /* word3 and word4 are rsvrd */
5997                 wqe->words[3] = 0;
5998                 wqe->words[4] = 0;
5999                 /* word5 iocb=rsvd wge=did */
6000                 /* There is no remote port id in the IOCB? */
6001                 /* Let this fall through and fail */
6002         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
6003         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
6004         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
6005         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
6006         default:
6007                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6008                                 "2014 Invalid command 0x%x\n",
6009                                 iocbq->iocb.ulpCommand);
6010                 return IOCB_ERROR;
6011         break;
6012
6013         }
6014         bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
6015         bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
6016         wqe->generic.abort_tag = abort_tag;
6017         bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
6018         bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
6019         bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
6020         bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
6021
6022         return 0;
6023 }
6024
6025 /**
6026  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
6027  * @phba: Pointer to HBA context object.
6028  * @ring_number: SLI ring number to issue iocb on.
6029  * @piocb: Pointer to command iocb.
6030  * @flag: Flag indicating if this command can be put into txq.
6031  *
6032  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6033  * an iocb command to an HBA with SLI-4 interface spec.
6034  *
6035  * This function is called with hbalock held. The function will return success
6036  * after it successfully submit the iocb to firmware or after adding to the
6037  * txq.
6038  **/
6039 static int
6040 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6041                          struct lpfc_iocbq *piocb, uint32_t flag)
6042 {
6043         struct lpfc_sglq *sglq;
6044         uint16_t xritag;
6045         union lpfc_wqe wqe;
6046         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6047         uint32_t fcp_wqidx;
6048
6049         if (piocb->sli4_xritag == NO_XRI) {
6050                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6051                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6052                         sglq = NULL;
6053                 else {
6054                         sglq = __lpfc_sli_get_sglq(phba);
6055                         if (!sglq)
6056                                 return IOCB_ERROR;
6057                         piocb->sli4_xritag = sglq->sli4_xritag;
6058                 }
6059         } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
6060                 sglq = NULL; /* These IO's already have an XRI and
6061                               * a mapped sgl.
6062                               */
6063         } else {
6064                 /* This is a continuation of a commandi,(CX) so this
6065                  * sglq is on the active list
6066                  */
6067                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6068                 if (!sglq)
6069                         return IOCB_ERROR;
6070         }
6071
6072         if (sglq) {
6073                 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6074                 if (xritag != sglq->sli4_xritag)
6075                         return IOCB_ERROR;
6076         }
6077
6078         if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6079                 return IOCB_ERROR;
6080
6081         if (piocb->iocb_flag &  LPFC_IO_FCP) {
6082                 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6083                 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6084                         return IOCB_ERROR;
6085         } else {
6086                 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6087                         return IOCB_ERROR;
6088         }
6089         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6090
6091         return 0;
6092 }
6093
6094 /**
6095  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6096  *
6097  * This routine wraps the actual lockless version for issusing IOCB function
6098  * pointer from the lpfc_hba struct.
6099  *
6100  * Return codes:
6101  *      IOCB_ERROR - Error
6102  *      IOCB_SUCCESS - Success
6103  *      IOCB_BUSY - Busy
6104  **/
6105 static inline int
6106 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6107                 struct lpfc_iocbq *piocb, uint32_t flag)
6108 {
6109         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6110 }
6111
6112 /**
6113  * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6114  * @phba: The hba struct for which this call is being executed.
6115  * @dev_grp: The HBA PCI-Device group number.
6116  *
6117  * This routine sets up the SLI interface API function jump table in @phba
6118  * struct.
6119  * Returns: 0 - success, -ENODEV - failure.
6120  **/
6121 int
6122 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6123 {
6124
6125         switch (dev_grp) {
6126         case LPFC_PCI_DEV_LP:
6127                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6128                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6129                 break;
6130         case LPFC_PCI_DEV_OC:
6131                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6132                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6133                 break;
6134         default:
6135                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6136                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
6137                                 dev_grp);
6138                 return -ENODEV;
6139                 break;
6140         }
6141         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6142         return 0;
6143 }
6144
6145 /**
6146  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
6147  * @phba: Pointer to HBA context object.
6148  * @pring: Pointer to driver SLI ring object.
6149  * @piocb: Pointer to command iocb.
6150  * @flag: Flag indicating if this command can be put into txq.
6151  *
6152  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
6153  * function. This function gets the hbalock and calls
6154  * __lpfc_sli_issue_iocb function and will return the error returned
6155  * by __lpfc_sli_issue_iocb function. This wrapper is used by
6156  * functions which do not hold hbalock.
6157  **/
6158 int
6159 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6160                     struct lpfc_iocbq *piocb, uint32_t flag)
6161 {
6162         unsigned long iflags;
6163         int rc;
6164
6165         spin_lock_irqsave(&phba->hbalock, iflags);
6166         rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6167         spin_unlock_irqrestore(&phba->hbalock, iflags);
6168
6169         return rc;
6170 }
6171
6172 /**
6173  * lpfc_extra_ring_setup - Extra ring setup function
6174  * @phba: Pointer to HBA context object.
6175  *
6176  * This function is called while driver attaches with the
6177  * HBA to setup the extra ring. The extra ring is used
6178  * only when driver needs to support target mode functionality
6179  * or IP over FC functionalities.
6180  *
6181  * This function is called with no lock held.
6182  **/
6183 static int
6184 lpfc_extra_ring_setup( struct lpfc_hba *phba)
6185 {
6186         struct lpfc_sli *psli;
6187         struct lpfc_sli_ring *pring;
6188
6189         psli = &phba->sli;
6190
6191         /* Adjust cmd/rsp ring iocb entries more evenly */
6192
6193         /* Take some away from the FCP ring */
6194         pring = &psli->ring[psli->fcp_ring];
6195         pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6196         pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6197         pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6198         pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6199
6200         /* and give them to the extra ring */
6201         pring = &psli->ring[psli->extra_ring];
6202
6203         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6204         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6205         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6206         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6207
6208         /* Setup default profile for this ring */
6209         pring->iotag_max = 4096;
6210         pring->num_mask = 1;
6211         pring->prt[0].profile = 0;      /* Mask 0 */
6212         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
6213         pring->prt[0].type = phba->cfg_multi_ring_type;
6214         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
6215         return 0;
6216 }
6217
6218 /**
6219  * lpfc_sli_async_event_handler - ASYNC iocb handler function
6220  * @phba: Pointer to HBA context object.
6221  * @pring: Pointer to driver SLI ring object.
6222  * @iocbq: Pointer to iocb object.
6223  *
6224  * This function is called by the slow ring event handler
6225  * function when there is an ASYNC event iocb in the ring.
6226  * This function is called with no lock held.
6227  * Currently this function handles only temperature related
6228  * ASYNC events. The function decodes the temperature sensor
6229  * event message and posts events for the management applications.
6230  **/
6231 static void
6232 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
6233         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
6234 {
6235         IOCB_t *icmd;
6236         uint16_t evt_code;
6237         uint16_t temp;
6238         struct temp_event temp_event_data;
6239         struct Scsi_Host *shost;
6240         uint32_t *iocb_w;
6241
6242         icmd = &iocbq->iocb;
6243         evt_code = icmd->un.asyncstat.evt_code;
6244         temp = icmd->ulpContext;
6245
6246         if ((evt_code != ASYNC_TEMP_WARN) &&
6247                 (evt_code != ASYNC_TEMP_SAFE)) {
6248                 iocb_w = (uint32_t *) icmd;
6249                 lpfc_printf_log(phba,
6250                         KERN_ERR,
6251                         LOG_SLI,
6252                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
6253                         " evt_code 0x%x\n"
6254                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
6255                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
6256                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
6257                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
6258                         pring->ringno,
6259                         icmd->un.asyncstat.evt_code,
6260                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
6261                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
6262                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
6263                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
6264
6265                 return;
6266         }
6267         temp_event_data.data = (uint32_t)temp;
6268         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6269         if (evt_code == ASYNC_TEMP_WARN) {
6270                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6271                 lpfc_printf_log(phba,
6272                                 KERN_ERR,
6273                                 LOG_TEMP,
6274                                 "0347 Adapter is very hot, please take "
6275                                 "corrective action. temperature : %d Celsius\n",
6276                                 temp);
6277         }
6278         if (evt_code == ASYNC_TEMP_SAFE) {
6279                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6280                 lpfc_printf_log(phba,
6281                                 KERN_ERR,
6282                                 LOG_TEMP,
6283                                 "0340 Adapter temperature is OK now. "
6284                                 "temperature : %d Celsius\n",
6285                                 temp);
6286         }
6287
6288         /* Send temperature change event to applications */
6289         shost = lpfc_shost_from_vport(phba->pport);
6290         fc_host_post_vendor_event(shost, fc_get_event_number(),
6291                 sizeof(temp_event_data), (char *) &temp_event_data,
6292                 LPFC_NL_VENDOR_ID);
6293
6294 }
6295
6296
6297 /**
6298  * lpfc_sli_setup - SLI ring setup function
6299  * @phba: Pointer to HBA context object.
6300  *
6301  * lpfc_sli_setup sets up rings of the SLI interface with
6302  * number of iocbs per ring and iotags. This function is
6303  * called while driver attach to the HBA and before the
6304  * interrupts are enabled. So there is no need for locking.
6305  *
6306  * This function always returns 0.
6307  **/
6308 int
6309 lpfc_sli_setup(struct lpfc_hba *phba)
6310 {
6311         int i, totiocbsize = 0;
6312         struct lpfc_sli *psli = &phba->sli;
6313         struct lpfc_sli_ring *pring;
6314
6315         psli->num_rings = MAX_CONFIGURED_RINGS;
6316         psli->sli_flag = 0;
6317         psli->fcp_ring = LPFC_FCP_RING;
6318         psli->next_ring = LPFC_FCP_NEXT_RING;
6319         psli->extra_ring = LPFC_EXTRA_RING;
6320
6321         psli->iocbq_lookup = NULL;
6322         psli->iocbq_lookup_len = 0;
6323         psli->last_iotag = 0;
6324
6325         for (i = 0; i < psli->num_rings; i++) {
6326                 pring = &psli->ring[i];
6327                 switch (i) {
6328                 case LPFC_FCP_RING:     /* ring 0 - FCP */
6329                         /* numCiocb and numRiocb are used in config_port */
6330                         pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
6331                         pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
6332                         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6333                         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6334                         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6335                         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6336                         pring->sizeCiocb = (phba->sli_rev == 3) ?
6337                                                         SLI3_IOCB_CMD_SIZE :
6338                                                         SLI2_IOCB_CMD_SIZE;
6339                         pring->sizeRiocb = (phba->sli_rev == 3) ?
6340                                                         SLI3_IOCB_RSP_SIZE :
6341                                                         SLI2_IOCB_RSP_SIZE;
6342                         pring->iotag_ctr = 0;
6343                         pring->iotag_max =
6344                             (phba->cfg_hba_queue_depth * 2);
6345                         pring->fast_iotag = pring->iotag_max;
6346                         pring->num_mask = 0;
6347                         break;
6348                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
6349                         /* numCiocb and numRiocb are used in config_port */
6350                         pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
6351                         pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
6352                         pring->sizeCiocb = (phba->sli_rev == 3) ?
6353                                                         SLI3_IOCB_CMD_SIZE :
6354                                                         SLI2_IOCB_CMD_SIZE;
6355                         pring->sizeRiocb = (phba->sli_rev == 3) ?
6356                                                         SLI3_IOCB_RSP_SIZE :
6357                                                         SLI2_IOCB_RSP_SIZE;
6358                         pring->iotag_max = phba->cfg_hba_queue_depth;
6359                         pring->num_mask = 0;
6360                         break;
6361                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
6362                         /* numCiocb and numRiocb are used in config_port */
6363                         pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
6364                         pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
6365                         pring->sizeCiocb = (phba->sli_rev == 3) ?
6366                                                         SLI3_IOCB_CMD_SIZE :
6367                                                         SLI2_IOCB_CMD_SIZE;
6368                         pring->sizeRiocb = (phba->sli_rev == 3) ?
6369                                                         SLI3_IOCB_RSP_SIZE :
6370                                                         SLI2_IOCB_RSP_SIZE;
6371                         pring->fast_iotag = 0;
6372                         pring->iotag_ctr = 0;
6373                         pring->iotag_max = 4096;
6374                         pring->lpfc_sli_rcv_async_status =
6375                                 lpfc_sli_async_event_handler;
6376                         pring->num_mask = LPFC_MAX_RING_MASK;
6377                         pring->prt[0].profile = 0;      /* Mask 0 */
6378                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
6379                         pring->prt[0].type = FC_TYPE_ELS;
6380                         pring->prt[0].lpfc_sli_rcv_unsol_event =
6381                             lpfc_els_unsol_event;
6382                         pring->prt[1].profile = 0;      /* Mask 1 */
6383                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
6384                         pring->prt[1].type = FC_TYPE_ELS;
6385                         pring->prt[1].lpfc_sli_rcv_unsol_event =
6386                             lpfc_els_unsol_event;
6387                         pring->prt[2].profile = 0;      /* Mask 2 */
6388                         /* NameServer Inquiry */
6389                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
6390                         /* NameServer */
6391                         pring->prt[2].type = FC_TYPE_CT;
6392                         pring->prt[2].lpfc_sli_rcv_unsol_event =
6393                             lpfc_ct_unsol_event;
6394                         pring->prt[3].profile = 0;      /* Mask 3 */
6395                         /* NameServer response */
6396                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
6397                         /* NameServer */
6398                         pring->prt[3].type = FC_TYPE_CT;
6399                         pring->prt[3].lpfc_sli_rcv_unsol_event =
6400                             lpfc_ct_unsol_event;
6401                         /* abort unsolicited sequence */
6402                         pring->prt[4].profile = 0;      /* Mask 4 */
6403                         pring->prt[4].rctl = FC_RCTL_BA_ABTS;
6404                         pring->prt[4].type = FC_TYPE_BLS;
6405                         pring->prt[4].lpfc_sli_rcv_unsol_event =
6406                             lpfc_sli4_ct_abort_unsol_event;
6407                         break;
6408                 }
6409                 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
6410                                 (pring->numRiocb * pring->sizeRiocb);
6411         }
6412         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
6413                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
6414                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
6415                        "SLI2 SLIM Data: x%x x%lx\n",
6416                        phba->brd_no, totiocbsize,
6417                        (unsigned long) MAX_SLIM_IOCB_SIZE);
6418         }
6419         if (phba->cfg_multi_ring_support == 2)
6420                 lpfc_extra_ring_setup(phba);
6421
6422         return 0;
6423 }
6424
6425 /**
6426  * lpfc_sli_queue_setup - Queue initialization function
6427  * @phba: Pointer to HBA context object.
6428  *
6429  * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
6430  * ring. This function also initializes ring indices of each ring.
6431  * This function is called during the initialization of the SLI
6432  * interface of an HBA.
6433  * This function is called with no lock held and always returns
6434  * 1.
6435  **/
6436 int
6437 lpfc_sli_queue_setup(struct lpfc_hba *phba)
6438 {
6439         struct lpfc_sli *psli;
6440         struct lpfc_sli_ring *pring;
6441         int i;
6442
6443         psli = &phba->sli;
6444         spin_lock_irq(&phba->hbalock);
6445         INIT_LIST_HEAD(&psli->mboxq);
6446         INIT_LIST_HEAD(&psli->mboxq_cmpl);
6447         /* Initialize list headers for txq and txcmplq as double linked lists */
6448         for (i = 0; i < psli->num_rings; i++) {
6449                 pring = &psli->ring[i];
6450                 pring->ringno = i;
6451                 pring->next_cmdidx  = 0;
6452                 pring->local_getidx = 0;
6453                 pring->cmdidx = 0;
6454                 INIT_LIST_HEAD(&pring->txq);
6455                 INIT_LIST_HEAD(&pring->txcmplq);
6456                 INIT_LIST_HEAD(&pring->iocb_continueq);
6457                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
6458                 INIT_LIST_HEAD(&pring->postbufq);
6459         }
6460         spin_unlock_irq(&phba->hbalock);
6461         return 1;
6462 }
6463
6464 /**
6465  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6466  * @phba: Pointer to HBA context object.
6467  *
6468  * This routine flushes the mailbox command subsystem. It will unconditionally
6469  * flush all the mailbox commands in the three possible stages in the mailbox
6470  * command sub-system: pending mailbox command queue; the outstanding mailbox
6471  * command; and completed mailbox command queue. It is caller's responsibility
6472  * to make sure that the driver is in the proper state to flush the mailbox
6473  * command sub-system. Namely, the posting of mailbox commands into the
6474  * pending mailbox command queue from the various clients must be stopped;
6475  * either the HBA is in a state that it will never works on the outstanding
6476  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6477  * mailbox command has been completed.
6478  **/
6479 static void
6480 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6481 {
6482         LIST_HEAD(completions);
6483         struct lpfc_sli *psli = &phba->sli;
6484         LPFC_MBOXQ_t *pmb;
6485         unsigned long iflag;
6486
6487         /* Flush all the mailbox commands in the mbox system */
6488         spin_lock_irqsave(&phba->hbalock, iflag);
6489         /* The pending mailbox command queue */
6490         list_splice_init(&phba->sli.mboxq, &completions);
6491         /* The outstanding active mailbox command */
6492         if (psli->mbox_active) {
6493                 list_add_tail(&psli->mbox_active->list, &completions);
6494                 psli->mbox_active = NULL;
6495                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6496         }
6497         /* The completed mailbox command queue */
6498         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6499         spin_unlock_irqrestore(&phba->hbalock, iflag);
6500
6501         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6502         while (!list_empty(&completions)) {
6503                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6504                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6505                 if (pmb->mbox_cmpl)
6506                         pmb->mbox_cmpl(phba, pmb);
6507         }
6508 }
6509
6510 /**
6511  * lpfc_sli_host_down - Vport cleanup function
6512  * @vport: Pointer to virtual port object.
6513  *
6514  * lpfc_sli_host_down is called to clean up the resources
6515  * associated with a vport before destroying virtual
6516  * port data structures.
6517  * This function does following operations:
6518  * - Free discovery resources associated with this virtual
6519  *   port.
6520  * - Free iocbs associated with this virtual port in
6521  *   the txq.
6522  * - Send abort for all iocb commands associated with this
6523  *   vport in txcmplq.
6524  *
6525  * This function is called with no lock held and always returns 1.
6526  **/
6527 int
6528 lpfc_sli_host_down(struct lpfc_vport *vport)
6529 {
6530         LIST_HEAD(completions);
6531         struct lpfc_hba *phba = vport->phba;
6532         struct lpfc_sli *psli = &phba->sli;
6533         struct lpfc_sli_ring *pring;
6534         struct lpfc_iocbq *iocb, *next_iocb;
6535         int i;
6536         unsigned long flags = 0;
6537         uint16_t prev_pring_flag;
6538
6539         lpfc_cleanup_discovery_resources(vport);
6540
6541         spin_lock_irqsave(&phba->hbalock, flags);
6542         for (i = 0; i < psli->num_rings; i++) {
6543                 pring = &psli->ring[i];
6544                 prev_pring_flag = pring->flag;
6545                 /* Only slow rings */
6546                 if (pring->ringno == LPFC_ELS_RING) {
6547                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
6548                         /* Set the lpfc data pending flag */
6549                         set_bit(LPFC_DATA_READY, &phba->data_flags);
6550                 }
6551                 /*
6552                  * Error everything on the txq since these iocbs have not been
6553                  * given to the FW yet.
6554                  */
6555                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
6556                         if (iocb->vport != vport)
6557                                 continue;
6558                         list_move_tail(&iocb->list, &completions);
6559                         pring->txq_cnt--;
6560                 }
6561
6562                 /* Next issue ABTS for everything on the txcmplq */
6563                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
6564                                                                         list) {
6565                         if (iocb->vport != vport)
6566                                 continue;
6567                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
6568                 }
6569
6570                 pring->flag = prev_pring_flag;
6571         }
6572
6573         spin_unlock_irqrestore(&phba->hbalock, flags);
6574
6575         /* Cancel all the IOCBs from the completions list */
6576         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6577                               IOERR_SLI_DOWN);
6578         return 1;
6579 }
6580
6581 /**
6582  * lpfc_sli_hba_down - Resource cleanup function for the HBA
6583  * @phba: Pointer to HBA context object.
6584  *
6585  * This function cleans up all iocb, buffers, mailbox commands
6586  * while shutting down the HBA. This function is called with no
6587  * lock held and always returns 1.
6588  * This function does the following to cleanup driver resources:
6589  * - Free discovery resources for each virtual port
6590  * - Cleanup any pending fabric iocbs
6591  * - Iterate through the iocb txq and free each entry
6592  *   in the list.
6593  * - Free up any buffer posted to the HBA
6594  * - Free mailbox commands in the mailbox queue.
6595  **/
6596 int
6597 lpfc_sli_hba_down(struct lpfc_hba *phba)
6598 {
6599         LIST_HEAD(completions);
6600         struct lpfc_sli *psli = &phba->sli;
6601         struct lpfc_sli_ring *pring;
6602         struct lpfc_dmabuf *buf_ptr;
6603         unsigned long flags = 0;
6604         int i;
6605
6606         /* Shutdown the mailbox command sub-system */
6607         lpfc_sli_mbox_sys_shutdown(phba);
6608
6609         lpfc_hba_down_prep(phba);
6610
6611         lpfc_fabric_abort_hba(phba);
6612
6613         spin_lock_irqsave(&phba->hbalock, flags);
6614         for (i = 0; i < psli->num_rings; i++) {
6615                 pring = &psli->ring[i];
6616                 /* Only slow rings */
6617                 if (pring->ringno == LPFC_ELS_RING) {
6618                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
6619                         /* Set the lpfc data pending flag */
6620                         set_bit(LPFC_DATA_READY, &phba->data_flags);
6621                 }
6622
6623                 /*
6624                  * Error everything on the txq since these iocbs have not been
6625                  * given to the FW yet.
6626                  */
6627                 list_splice_init(&pring->txq, &completions);
6628                 pring->txq_cnt = 0;
6629
6630         }
6631         spin_unlock_irqrestore(&phba->hbalock, flags);
6632
6633         /* Cancel all the IOCBs from the completions list */
6634         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6635                               IOERR_SLI_DOWN);
6636
6637         spin_lock_irqsave(&phba->hbalock, flags);
6638         list_splice_init(&phba->elsbuf, &completions);
6639         phba->elsbuf_cnt = 0;
6640         phba->elsbuf_prev_cnt = 0;
6641         spin_unlock_irqrestore(&phba->hbalock, flags);
6642
6643         while (!list_empty(&completions)) {
6644                 list_remove_head(&completions, buf_ptr,
6645                         struct lpfc_dmabuf, list);
6646                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
6647                 kfree(buf_ptr);
6648         }
6649
6650         /* Return any active mbox cmds */
6651         del_timer_sync(&psli->mbox_tmo);
6652
6653         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
6654         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6655         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
6656
6657         return 1;
6658 }
6659
6660 /**
6661  * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
6662  * @phba: Pointer to HBA context object.
6663  *
6664  * This function cleans up all queues, iocb, buffers, mailbox commands while
6665  * shutting down the SLI4 HBA FCoE function. This function is called with no
6666  * lock held and always returns 1.
6667  *
6668  * This function does the following to cleanup driver FCoE function resources:
6669  * - Free discovery resources for each virtual port
6670  * - Cleanup any pending fabric iocbs
6671  * - Iterate through the iocb txq and free each entry in the list.
6672  * - Free up any buffer posted to the HBA.
6673  * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6674  * - Free mailbox commands in the mailbox queue.
6675  **/
6676 int
6677 lpfc_sli4_hba_down(struct lpfc_hba *phba)
6678 {
6679         /* Stop the SLI4 device port */
6680         lpfc_stop_port(phba);
6681
6682         /* Tear down the queues in the HBA */
6683         lpfc_sli4_queue_unset(phba);
6684
6685         /* unregister default FCFI from the HBA */
6686         lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
6687
6688         return 1;
6689 }
6690
6691 /**
6692  * lpfc_sli_pcimem_bcopy - SLI memory copy function
6693  * @srcp: Source memory pointer.
6694  * @destp: Destination memory pointer.
6695  * @cnt: Number of words required to be copied.
6696  *
6697  * This function is used for copying data between driver memory
6698  * and the SLI memory. This function also changes the endianness
6699  * of each word if native endianness is different from SLI
6700  * endianness. This function can be called with or without
6701  * lock.
6702  **/
6703 void
6704 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
6705 {
6706         uint32_t *src = srcp;
6707         uint32_t *dest = destp;
6708         uint32_t ldata;
6709         int i;
6710
6711         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
6712                 ldata = *src;
6713                 ldata = le32_to_cpu(ldata);
6714                 *dest = ldata;
6715                 src++;
6716                 dest++;
6717         }
6718 }
6719
6720
6721 /**
6722  * lpfc_sli_bemem_bcopy - SLI memory copy function
6723  * @srcp: Source memory pointer.
6724  * @destp: Destination memory pointer.
6725  * @cnt: Number of words required to be copied.
6726  *
6727  * This function is used for copying data between a data structure
6728  * with big endian representation to local endianness.
6729  * This function can be called with or without lock.
6730  **/
6731 void
6732 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
6733 {
6734         uint32_t *src = srcp;
6735         uint32_t *dest = destp;
6736         uint32_t ldata;
6737         int i;
6738
6739         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
6740                 ldata = *src;
6741                 ldata = be32_to_cpu(ldata);
6742                 *dest = ldata;
6743                 src++;
6744                 dest++;
6745         }
6746 }
6747
6748 /**
6749  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
6750  * @phba: Pointer to HBA context object.
6751  * @pring: Pointer to driver SLI ring object.
6752  * @mp: Pointer to driver buffer object.
6753  *
6754  * This function is called with no lock held.
6755  * It always return zero after adding the buffer to the postbufq
6756  * buffer list.
6757  **/
6758 int
6759 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6760                          struct lpfc_dmabuf *mp)
6761 {
6762         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
6763            later */
6764         spin_lock_irq(&phba->hbalock);
6765         list_add_tail(&mp->list, &pring->postbufq);
6766         pring->postbufq_cnt++;
6767         spin_unlock_irq(&phba->hbalock);
6768         return 0;
6769 }
6770
6771 /**
6772  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
6773  * @phba: Pointer to HBA context object.
6774  *
6775  * When HBQ is enabled, buffers are searched based on tags. This function
6776  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
6777  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
6778  * does not conflict with tags of buffer posted for unsolicited events.
6779  * The function returns the allocated tag. The function is called with
6780  * no locks held.
6781  **/
6782 uint32_t
6783 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
6784 {
6785         spin_lock_irq(&phba->hbalock);
6786         phba->buffer_tag_count++;
6787         /*
6788          * Always set the QUE_BUFTAG_BIT to distiguish between
6789          * a tag assigned by HBQ.
6790          */
6791         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
6792         spin_unlock_irq(&phba->hbalock);
6793         return phba->buffer_tag_count;
6794 }
6795
6796 /**
6797  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
6798  * @phba: Pointer to HBA context object.
6799  * @pring: Pointer to driver SLI ring object.
6800  * @tag: Buffer tag.
6801  *
6802  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
6803  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
6804  * iocb is posted to the response ring with the tag of the buffer.
6805  * This function searches the pring->postbufq list using the tag
6806  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
6807  * iocb. If the buffer is found then lpfc_dmabuf object of the
6808  * buffer is returned to the caller else NULL is returned.
6809  * This function is called with no lock held.
6810  **/
6811 struct lpfc_dmabuf *
6812 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6813                         uint32_t tag)
6814 {
6815         struct lpfc_dmabuf *mp, *next_mp;
6816         struct list_head *slp = &pring->postbufq;
6817
6818         /* Search postbufq, from the begining, looking for a match on tag */
6819         spin_lock_irq(&phba->hbalock);
6820         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
6821                 if (mp->buffer_tag == tag) {
6822                         list_del_init(&mp->list);
6823                         pring->postbufq_cnt--;
6824                         spin_unlock_irq(&phba->hbalock);
6825                         return mp;
6826                 }
6827         }
6828
6829         spin_unlock_irq(&phba->hbalock);
6830         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6831                         "0402 Cannot find virtual addr for buffer tag on "
6832                         "ring %d Data x%lx x%p x%p x%x\n",
6833                         pring->ringno, (unsigned long) tag,
6834                         slp->next, slp->prev, pring->postbufq_cnt);
6835
6836         return NULL;
6837 }
6838
6839 /**
6840  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
6841  * @phba: Pointer to HBA context object.
6842  * @pring: Pointer to driver SLI ring object.
6843  * @phys: DMA address of the buffer.
6844  *
6845  * This function searches the buffer list using the dma_address
6846  * of unsolicited event to find the driver's lpfc_dmabuf object
6847  * corresponding to the dma_address. The function returns the
6848  * lpfc_dmabuf object if a buffer is found else it returns NULL.
6849  * This function is called by the ct and els unsolicited event
6850  * handlers to get the buffer associated with the unsolicited
6851  * event.
6852  *
6853  * This function is called with no lock held.
6854  **/
6855 struct lpfc_dmabuf *
6856 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6857                          dma_addr_t phys)
6858 {
6859         struct lpfc_dmabuf *mp, *next_mp;
6860         struct list_head *slp = &pring->postbufq;
6861
6862         /* Search postbufq, from the begining, looking for a match on phys */
6863         spin_lock_irq(&phba->hbalock);
6864         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
6865                 if (mp->phys == phys) {
6866                         list_del_init(&mp->list);
6867                         pring->postbufq_cnt--;
6868                         spin_unlock_irq(&phba->hbalock);
6869                         return mp;
6870                 }
6871         }
6872
6873         spin_unlock_irq(&phba->hbalock);
6874         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6875                         "0410 Cannot find virtual addr for mapped buf on "
6876                         "ring %d Data x%llx x%p x%p x%x\n",
6877                         pring->ringno, (unsigned long long)phys,
6878                         slp->next, slp->prev, pring->postbufq_cnt);
6879         return NULL;
6880 }
6881
6882 /**
6883  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
6884  * @phba: Pointer to HBA context object.
6885  * @cmdiocb: Pointer to driver command iocb object.
6886  * @rspiocb: Pointer to driver response iocb object.
6887  *
6888  * This function is the completion handler for the abort iocbs for
6889  * ELS commands. This function is called from the ELS ring event
6890  * handler with no lock held. This function frees memory resources
6891  * associated with the abort iocb.
6892  **/
6893 static void
6894 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6895                         struct lpfc_iocbq *rspiocb)
6896 {
6897         IOCB_t *irsp = &rspiocb->iocb;
6898         uint16_t abort_iotag, abort_context;
6899         struct lpfc_iocbq *abort_iocb;
6900         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6901
6902         abort_iocb = NULL;
6903
6904         if (irsp->ulpStatus) {
6905                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
6906                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6907
6908                 spin_lock_irq(&phba->hbalock);
6909                 if (phba->sli_rev < LPFC_SLI_REV4) {
6910                         if (abort_iotag != 0 &&
6911                                 abort_iotag <= phba->sli.last_iotag)
6912                                 abort_iocb =
6913                                         phba->sli.iocbq_lookup[abort_iotag];
6914                 } else
6915                         /* For sli4 the abort_tag is the XRI,
6916                          * so the abort routine puts the iotag  of the iocb
6917                          * being aborted in the context field of the abort
6918                          * IOCB.
6919                          */
6920                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
6921
6922                 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
6923                                 "0327 Cannot abort els iocb %p "
6924                                 "with tag %x context %x, abort status %x, "
6925                                 "abort code %x\n",
6926                                 abort_iocb, abort_iotag, abort_context,
6927                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
6928
6929                 /*
6930                  *  If the iocb is not found in Firmware queue the iocb
6931                  *  might have completed already. Do not free it again.
6932                  */
6933                 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
6934                         if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
6935                                 spin_unlock_irq(&phba->hbalock);
6936                                 lpfc_sli_release_iocbq(phba, cmdiocb);
6937                                 return;
6938                         }
6939                         /* For SLI4 the ulpContext field for abort IOCB
6940                          * holds the iotag of the IOCB being aborted so
6941                          * the local abort_context needs to be reset to
6942                          * match the aborted IOCBs ulpContext.
6943                          */
6944                         if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
6945                                 abort_context = abort_iocb->iocb.ulpContext;
6946                 }
6947                 /*
6948                  * make sure we have the right iocbq before taking it
6949                  * off the txcmplq and try to call completion routine.
6950                  */
6951                 if (!abort_iocb ||
6952                     abort_iocb->iocb.ulpContext != abort_context ||
6953                     (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
6954                         spin_unlock_irq(&phba->hbalock);
6955                 else {
6956                         list_del_init(&abort_iocb->list);
6957                         pring->txcmplq_cnt--;
6958                         spin_unlock_irq(&phba->hbalock);
6959
6960                         /* Firmware could still be in progress of DMAing
6961                          * payload, so don't free data buffer till after
6962                          * a hbeat.
6963                          */
6964                         abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
6965
6966                         abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
6967                         abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
6968                         abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
6969                         (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
6970                 }
6971         }
6972
6973         lpfc_sli_release_iocbq(phba, cmdiocb);
6974         return;
6975 }
6976
6977 /**
6978  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
6979  * @phba: Pointer to HBA context object.
6980  * @cmdiocb: Pointer to driver command iocb object.
6981  * @rspiocb: Pointer to driver response iocb object.
6982  *
6983  * The function is called from SLI ring event handler with no
6984  * lock held. This function is the completion handler for ELS commands
6985  * which are aborted. The function frees memory resources used for
6986  * the aborted ELS commands.
6987  **/
6988 static void
6989 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6990                      struct lpfc_iocbq *rspiocb)
6991 {
6992         IOCB_t *irsp = &rspiocb->iocb;
6993
6994         /* ELS cmd tag <ulpIoTag> completes */
6995         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
6996                         "0139 Ignoring ELS cmd tag x%x completion Data: "
6997                         "x%x x%x x%x\n",
6998                         irsp->ulpIoTag, irsp->ulpStatus,
6999                         irsp->un.ulpWord[4], irsp->ulpTimeout);
7000         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
7001                 lpfc_ct_free_iocb(phba, cmdiocb);
7002         else
7003                 lpfc_els_free_iocb(phba, cmdiocb);
7004         return;
7005 }
7006
7007 /**
7008  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
7009  * @phba: Pointer to HBA context object.
7010  * @pring: Pointer to driver SLI ring object.
7011  * @cmdiocb: Pointer to driver command iocb object.
7012  *
7013  * This function issues an abort iocb for the provided command
7014  * iocb. This function is called with hbalock held.
7015  * The function returns 0 when it fails due to memory allocation
7016  * failure or when the command iocb is an abort request.
7017  **/
7018 int
7019 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7020                            struct lpfc_iocbq *cmdiocb)
7021 {
7022         struct lpfc_vport *vport = cmdiocb->vport;
7023         struct lpfc_iocbq *abtsiocbp;
7024         IOCB_t *icmd = NULL;
7025         IOCB_t *iabt = NULL;
7026         int retval = IOCB_ERROR;
7027
7028         /*
7029          * There are certain command types we don't want to abort.  And we
7030          * don't want to abort commands that are already in the process of
7031          * being aborted.
7032          */
7033         icmd = &cmdiocb->iocb;
7034         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
7035             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7036             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
7037                 return 0;
7038
7039         /* If we're unloading, don't abort iocb on the ELS ring, but change the
7040          * callback so that nothing happens when it finishes.
7041          */
7042         if ((vport->load_flag & FC_UNLOADING) &&
7043             (pring->ringno == LPFC_ELS_RING)) {
7044                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
7045                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
7046                 else
7047                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
7048                 goto abort_iotag_exit;
7049         }
7050
7051         /* issue ABTS for this IOCB based on iotag */
7052         abtsiocbp = __lpfc_sli_get_iocbq(phba);
7053         if (abtsiocbp == NULL)
7054                 return 0;
7055
7056         /* This signals the response to set the correct status
7057          * before calling the completion handler.
7058          */
7059         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7060
7061         iabt = &abtsiocbp->iocb;
7062         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7063         iabt->un.acxri.abortContextTag = icmd->ulpContext;
7064         if (phba->sli_rev == LPFC_SLI_REV4) {
7065                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7066                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7067         }
7068         else
7069                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7070         iabt->ulpLe = 1;
7071         iabt->ulpClass = icmd->ulpClass;
7072
7073         if (phba->link_state >= LPFC_LINK_UP)
7074                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7075         else
7076                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
7077
7078         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
7079
7080         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
7081                          "0339 Abort xri x%x, original iotag x%x, "
7082                          "abort cmd iotag x%x\n",
7083                          iabt->un.acxri.abortContextTag,
7084                          iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
7085         retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
7086
7087         if (retval)
7088                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
7089 abort_iotag_exit:
7090         /*
7091          * Caller to this routine should check for IOCB_ERROR
7092          * and handle it properly.  This routine no longer removes
7093          * iocb off txcmplq and call compl in case of IOCB_ERROR.
7094          */
7095         return retval;
7096 }
7097
7098 /**
7099  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
7100  * @iocbq: Pointer to driver iocb object.
7101  * @vport: Pointer to driver virtual port object.
7102  * @tgt_id: SCSI ID of the target.
7103  * @lun_id: LUN ID of the scsi device.
7104  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
7105  *
7106  * This function acts as an iocb filter for functions which abort or count
7107  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
7108  * 0 if the filtering criteria is met for the given iocb and will return
7109  * 1 if the filtering criteria is not met.
7110  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
7111  * given iocb is for the SCSI device specified by vport, tgt_id and
7112  * lun_id parameter.
7113  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
7114  * given iocb is for the SCSI target specified by vport and tgt_id
7115  * parameters.
7116  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
7117  * given iocb is for the SCSI host associated with the given vport.
7118  * This function is called with no locks held.
7119  **/
7120 static int
7121 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
7122                            uint16_t tgt_id, uint64_t lun_id,
7123                            lpfc_ctx_cmd ctx_cmd)
7124 {
7125         struct lpfc_scsi_buf *lpfc_cmd;
7126         int rc = 1;
7127
7128         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
7129                 return rc;
7130
7131         if (iocbq->vport != vport)
7132                 return rc;
7133
7134         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
7135
7136         if (lpfc_cmd->pCmd == NULL)
7137                 return rc;
7138
7139         switch (ctx_cmd) {
7140         case LPFC_CTX_LUN:
7141                 if ((lpfc_cmd->rdata->pnode) &&
7142                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
7143                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
7144                         rc = 0;
7145                 break;
7146         case LPFC_CTX_TGT:
7147                 if ((lpfc_cmd->rdata->pnode) &&
7148                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
7149                         rc = 0;
7150                 break;
7151         case LPFC_CTX_HOST:
7152                 rc = 0;
7153                 break;
7154         default:
7155                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
7156                         __func__, ctx_cmd);
7157                 break;
7158         }
7159
7160         return rc;
7161 }
7162
7163 /**
7164  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
7165  * @vport: Pointer to virtual port.
7166  * @tgt_id: SCSI ID of the target.
7167  * @lun_id: LUN ID of the scsi device.
7168  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7169  *
7170  * This function returns number of FCP commands pending for the vport.
7171  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
7172  * commands pending on the vport associated with SCSI device specified
7173  * by tgt_id and lun_id parameters.
7174  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
7175  * commands pending on the vport associated with SCSI target specified
7176  * by tgt_id parameter.
7177  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
7178  * commands pending on the vport.
7179  * This function returns the number of iocbs which satisfy the filter.
7180  * This function is called without any lock held.
7181  **/
7182 int
7183 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
7184                   lpfc_ctx_cmd ctx_cmd)
7185 {
7186         struct lpfc_hba *phba = vport->phba;
7187         struct lpfc_iocbq *iocbq;
7188         int sum, i;
7189
7190         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
7191                 iocbq = phba->sli.iocbq_lookup[i];
7192
7193                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
7194                                                 ctx_cmd) == 0)
7195                         sum++;
7196         }
7197
7198         return sum;
7199 }
7200
7201 /**
7202  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
7203  * @phba: Pointer to HBA context object
7204  * @cmdiocb: Pointer to command iocb object.
7205  * @rspiocb: Pointer to response iocb object.
7206  *
7207  * This function is called when an aborted FCP iocb completes. This
7208  * function is called by the ring event handler with no lock held.
7209  * This function frees the iocb.
7210  **/
7211 void
7212 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7213                         struct lpfc_iocbq *rspiocb)
7214 {
7215         lpfc_sli_release_iocbq(phba, cmdiocb);
7216         return;
7217 }
7218
7219 /**
7220  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
7221  * @vport: Pointer to virtual port.
7222  * @pring: Pointer to driver SLI ring object.
7223  * @tgt_id: SCSI ID of the target.
7224  * @lun_id: LUN ID of the scsi device.
7225  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7226  *
7227  * This function sends an abort command for every SCSI command
7228  * associated with the given virtual port pending on the ring
7229  * filtered by lpfc_sli_validate_fcp_iocb function.
7230  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
7231  * FCP iocbs associated with lun specified by tgt_id and lun_id
7232  * parameters
7233  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
7234  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
7235  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
7236  * FCP iocbs associated with virtual port.
7237  * This function returns number of iocbs it failed to abort.
7238  * This function is called with no locks held.
7239  **/
7240 int
7241 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7242                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
7243 {
7244         struct lpfc_hba *phba = vport->phba;
7245         struct lpfc_iocbq *iocbq;
7246         struct lpfc_iocbq *abtsiocb;
7247         IOCB_t *cmd = NULL;
7248         int errcnt = 0, ret_val = 0;
7249         int i;
7250
7251         for (i = 1; i <= phba->sli.last_iotag; i++) {
7252                 iocbq = phba->sli.iocbq_lookup[i];
7253
7254                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
7255                                                abort_cmd) != 0)
7256                         continue;
7257
7258                 /* issue ABTS for this IOCB based on iotag */
7259                 abtsiocb = lpfc_sli_get_iocbq(phba);
7260                 if (abtsiocb == NULL) {
7261                         errcnt++;
7262                         continue;
7263                 }
7264
7265                 cmd = &iocbq->iocb;
7266                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
7267                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
7268                 if (phba->sli_rev == LPFC_SLI_REV4)
7269                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7270                 else
7271                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
7272                 abtsiocb->iocb.ulpLe = 1;
7273                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
7274                 abtsiocb->vport = phba->pport;
7275
7276                 if (lpfc_is_link_up(phba))
7277                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7278                 else
7279                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
7280
7281                 /* Setup callback routine and issue the command. */
7282                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
7283                 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7284                                               abtsiocb, 0);
7285                 if (ret_val == IOCB_ERROR) {
7286                         lpfc_sli_release_iocbq(phba, abtsiocb);
7287                         errcnt++;
7288                         continue;
7289                 }
7290         }
7291
7292         return errcnt;
7293 }
7294
7295 /**
7296  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
7297  * @phba: Pointer to HBA context object.
7298  * @cmdiocbq: Pointer to command iocb.
7299  * @rspiocbq: Pointer to response iocb.
7300  *
7301  * This function is the completion handler for iocbs issued using
7302  * lpfc_sli_issue_iocb_wait function. This function is called by the
7303  * ring event handler function without any lock held. This function
7304  * can be called from both worker thread context and interrupt
7305  * context. This function also can be called from other thread which
7306  * cleans up the SLI layer objects.
7307  * This function copy the contents of the response iocb to the
7308  * response iocb memory object provided by the caller of
7309  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
7310  * sleeps for the iocb completion.
7311  **/
7312 static void
7313 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7314                         struct lpfc_iocbq *cmdiocbq,
7315                         struct lpfc_iocbq *rspiocbq)
7316 {
7317         wait_queue_head_t *pdone_q;
7318         unsigned long iflags;
7319
7320         spin_lock_irqsave(&phba->hbalock, iflags);
7321         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
7322         if (cmdiocbq->context2 && rspiocbq)
7323                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7324                        &rspiocbq->iocb, sizeof(IOCB_t));
7325
7326         pdone_q = cmdiocbq->context_un.wait_queue;
7327         if (pdone_q)
7328                 wake_up(pdone_q);
7329         spin_unlock_irqrestore(&phba->hbalock, iflags);
7330         return;
7331 }
7332
7333 /**
7334  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7335  * @phba: Pointer to HBA context object..
7336  * @piocbq: Pointer to command iocb.
7337  * @flag: Flag to test.
7338  *
7339  * This routine grabs the hbalock and then test the iocb_flag to
7340  * see if the passed in flag is set.
7341  * Returns:
7342  * 1 if flag is set.
7343  * 0 if flag is not set.
7344  **/
7345 static int
7346 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7347                  struct lpfc_iocbq *piocbq, uint32_t flag)
7348 {
7349         unsigned long iflags;
7350         int ret;
7351
7352         spin_lock_irqsave(&phba->hbalock, iflags);
7353         ret = piocbq->iocb_flag & flag;
7354         spin_unlock_irqrestore(&phba->hbalock, iflags);
7355         return ret;
7356
7357 }
7358
7359 /**
7360  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
7361  * @phba: Pointer to HBA context object..
7362  * @pring: Pointer to sli ring.
7363  * @piocb: Pointer to command iocb.
7364  * @prspiocbq: Pointer to response iocb.
7365  * @timeout: Timeout in number of seconds.
7366  *
7367  * This function issues the iocb to firmware and waits for the
7368  * iocb to complete. If the iocb command is not
7369  * completed within timeout seconds, it returns IOCB_TIMEDOUT.
7370  * Caller should not free the iocb resources if this function
7371  * returns IOCB_TIMEDOUT.
7372  * The function waits for the iocb completion using an
7373  * non-interruptible wait.
7374  * This function will sleep while waiting for iocb completion.
7375  * So, this function should not be called from any context which
7376  * does not allow sleeping. Due to the same reason, this function
7377  * cannot be called with interrupt disabled.
7378  * This function assumes that the iocb completions occur while
7379  * this function sleep. So, this function cannot be called from
7380  * the thread which process iocb completion for this ring.
7381  * This function clears the iocb_flag of the iocb object before
7382  * issuing the iocb and the iocb completion handler sets this
7383  * flag and wakes this thread when the iocb completes.
7384  * The contents of the response iocb will be copied to prspiocbq
7385  * by the completion handler when the command completes.
7386  * This function returns IOCB_SUCCESS when success.
7387  * This function is called with no lock held.
7388  **/
7389 int
7390 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7391                          uint32_t ring_number,
7392                          struct lpfc_iocbq *piocb,
7393                          struct lpfc_iocbq *prspiocbq,
7394                          uint32_t timeout)
7395 {
7396         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
7397         long timeleft, timeout_req = 0;
7398         int retval = IOCB_SUCCESS;
7399         uint32_t creg_val;
7400
7401         /*
7402          * If the caller has provided a response iocbq buffer, then context2
7403          * is NULL or its an error.
7404          */
7405         if (prspiocbq) {
7406                 if (piocb->context2)
7407                         return IOCB_ERROR;
7408                 piocb->context2 = prspiocbq;
7409         }
7410
7411         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
7412         piocb->context_un.wait_queue = &done_q;
7413         piocb->iocb_flag &= ~LPFC_IO_WAKE;
7414
7415         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7416                 creg_val = readl(phba->HCregaddr);
7417                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
7418                 writel(creg_val, phba->HCregaddr);
7419                 readl(phba->HCregaddr); /* flush */
7420         }
7421
7422         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
7423         if (retval == IOCB_SUCCESS) {
7424                 timeout_req = timeout * HZ;
7425                 timeleft = wait_event_timeout(done_q,
7426                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
7427                                 timeout_req);
7428
7429                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
7430                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7431                                         "0331 IOCB wake signaled\n");
7432                 } else if (timeleft == 0) {
7433                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7434                                         "0338 IOCB wait timeout error - no "
7435                                         "wake response Data x%x\n", timeout);
7436                         retval = IOCB_TIMEDOUT;
7437                 } else {
7438                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7439                                         "0330 IOCB wake NOT set, "
7440                                         "Data x%x x%lx\n",
7441                                         timeout, (timeleft / jiffies));
7442                         retval = IOCB_TIMEDOUT;
7443                 }
7444         } else {
7445                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7446                                 "0332 IOCB wait issue failed, Data x%x\n",
7447                                 retval);
7448                 retval = IOCB_ERROR;
7449         }
7450
7451         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7452                 creg_val = readl(phba->HCregaddr);
7453                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
7454                 writel(creg_val, phba->HCregaddr);
7455                 readl(phba->HCregaddr); /* flush */
7456         }
7457
7458         if (prspiocbq)
7459                 piocb->context2 = NULL;
7460
7461         piocb->context_un.wait_queue = NULL;
7462         piocb->iocb_cmpl = NULL;
7463         return retval;
7464 }
7465
7466 /**
7467  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
7468  * @phba: Pointer to HBA context object.
7469  * @pmboxq: Pointer to driver mailbox object.
7470  * @timeout: Timeout in number of seconds.
7471  *
7472  * This function issues the mailbox to firmware and waits for the
7473  * mailbox command to complete. If the mailbox command is not
7474  * completed within timeout seconds, it returns MBX_TIMEOUT.
7475  * The function waits for the mailbox completion using an
7476  * interruptible wait. If the thread is woken up due to a
7477  * signal, MBX_TIMEOUT error is returned to the caller. Caller
7478  * should not free the mailbox resources, if this function returns
7479  * MBX_TIMEOUT.
7480  * This function will sleep while waiting for mailbox completion.
7481  * So, this function should not be called from any context which
7482  * does not allow sleeping. Due to the same reason, this function
7483  * cannot be called with interrupt disabled.
7484  * This function assumes that the mailbox completion occurs while
7485  * this function sleep. So, this function cannot be called from
7486  * the worker thread which processes mailbox completion.
7487  * This function is called in the context of HBA management
7488  * applications.
7489  * This function returns MBX_SUCCESS when successful.
7490  * This function is called with no lock held.
7491  **/
7492 int
7493 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
7494                          uint32_t timeout)
7495 {
7496         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
7497         int retval;
7498         unsigned long flag;
7499
7500         /* The caller must leave context1 empty. */
7501         if (pmboxq->context1)
7502                 return MBX_NOT_FINISHED;
7503
7504         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
7505         /* setup wake call as IOCB callback */
7506         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
7507         /* setup context field to pass wait_queue pointer to wake function  */
7508         pmboxq->context1 = &done_q;
7509
7510         /* now issue the command */
7511         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
7512
7513         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7514                 wait_event_interruptible_timeout(done_q,
7515                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
7516                                 timeout * HZ);
7517
7518                 spin_lock_irqsave(&phba->hbalock, flag);
7519                 pmboxq->context1 = NULL;
7520                 /*
7521                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
7522                  * else do not free the resources.
7523                  */
7524                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
7525                         retval = MBX_SUCCESS;
7526                 else {
7527                         retval = MBX_TIMEOUT;
7528                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7529                 }
7530                 spin_unlock_irqrestore(&phba->hbalock, flag);
7531         }
7532
7533         return retval;
7534 }
7535
7536 /**
7537  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
7538  * @phba: Pointer to HBA context.
7539  *
7540  * This function is called to shutdown the driver's mailbox sub-system.
7541  * It first marks the mailbox sub-system is in a block state to prevent
7542  * the asynchronous mailbox command from issued off the pending mailbox
7543  * command queue. If the mailbox command sub-system shutdown is due to
7544  * HBA error conditions such as EEH or ERATT, this routine shall invoke
7545  * the mailbox sub-system flush routine to forcefully bring down the
7546  * mailbox sub-system. Otherwise, if it is due to normal condition (such
7547  * as with offline or HBA function reset), this routine will wait for the
7548  * outstanding mailbox command to complete before invoking the mailbox
7549  * sub-system flush routine to gracefully bring down mailbox sub-system.
7550  **/
7551 void
7552 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
7553 {
7554         struct lpfc_sli *psli = &phba->sli;
7555         uint8_t actcmd = MBX_HEARTBEAT;
7556         unsigned long timeout;
7557
7558         spin_lock_irq(&phba->hbalock);
7559         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7560         spin_unlock_irq(&phba->hbalock);
7561
7562         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7563                 spin_lock_irq(&phba->hbalock);
7564                 if (phba->sli.mbox_active)
7565                         actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
7566                 spin_unlock_irq(&phba->hbalock);
7567                 /* Determine how long we might wait for the active mailbox
7568                  * command to be gracefully completed by firmware.
7569                  */
7570                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7571                                            1000) + jiffies;
7572                 while (phba->sli.mbox_active) {
7573                         /* Check active mailbox complete status every 2ms */
7574                         msleep(2);
7575                         if (time_after(jiffies, timeout))
7576                                 /* Timeout, let the mailbox flush routine to
7577                                  * forcefully release active mailbox command
7578                                  */
7579                                 break;
7580                 }
7581         }
7582         lpfc_sli_mbox_sys_flush(phba);
7583 }
7584
7585 /**
7586  * lpfc_sli_eratt_read - read sli-3 error attention events
7587  * @phba: Pointer to HBA context.
7588  *
7589  * This function is called to read the SLI3 device error attention registers
7590  * for possible error attention events. The caller must hold the hostlock
7591  * with spin_lock_irq().
7592  *
7593  * This fucntion returns 1 when there is Error Attention in the Host Attention
7594  * Register and returns 0 otherwise.
7595  **/
7596 static int
7597 lpfc_sli_eratt_read(struct lpfc_hba *phba)
7598 {
7599         uint32_t ha_copy;
7600
7601         /* Read chip Host Attention (HA) register */
7602         ha_copy = readl(phba->HAregaddr);
7603         if (ha_copy & HA_ERATT) {
7604                 /* Read host status register to retrieve error event */
7605                 lpfc_sli_read_hs(phba);
7606
7607                 /* Check if there is a deferred error condition is active */
7608                 if ((HS_FFER1 & phba->work_hs) &&
7609                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7610                      HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7611                         phba->hba_flag |= DEFER_ERATT;
7612                         /* Clear all interrupt enable conditions */
7613                         writel(0, phba->HCregaddr);
7614                         readl(phba->HCregaddr);
7615                 }
7616
7617                 /* Set the driver HA work bitmap */
7618                 phba->work_ha |= HA_ERATT;
7619                 /* Indicate polling handles this ERATT */
7620                 phba->hba_flag |= HBA_ERATT_HANDLED;
7621                 return 1;
7622         }
7623         return 0;
7624 }
7625
7626 /**
7627  * lpfc_sli4_eratt_read - read sli-4 error attention events
7628  * @phba: Pointer to HBA context.
7629  *
7630  * This function is called to read the SLI4 device error attention registers
7631  * for possible error attention events. The caller must hold the hostlock
7632  * with spin_lock_irq().
7633  *
7634  * This fucntion returns 1 when there is Error Attention in the Host Attention
7635  * Register and returns 0 otherwise.
7636  **/
7637 static int
7638 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7639 {
7640         uint32_t uerr_sta_hi, uerr_sta_lo;
7641         uint32_t onlnreg0, onlnreg1;
7642
7643         /* For now, use the SLI4 device internal unrecoverable error
7644          * registers for error attention. This can be changed later.
7645          */
7646         onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7647         onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7648         if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7649                 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7650                 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7651                 if (uerr_sta_lo || uerr_sta_hi) {
7652                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7653                                         "1423 HBA Unrecoverable error: "
7654                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7655                                         "online0_reg=0x%x, online1_reg=0x%x\n",
7656                                         uerr_sta_lo, uerr_sta_hi,
7657                                         onlnreg0, onlnreg1);
7658                         phba->work_status[0] = uerr_sta_lo;
7659                         phba->work_status[1] = uerr_sta_hi;
7660                         /* Set the driver HA work bitmap */
7661                         phba->work_ha |= HA_ERATT;
7662                         /* Indicate polling handles this ERATT */
7663                         phba->hba_flag |= HBA_ERATT_HANDLED;
7664                         return 1;
7665                 }
7666         }
7667         return 0;
7668 }
7669
7670 /**
7671  * lpfc_sli_check_eratt - check error attention events
7672  * @phba: Pointer to HBA context.
7673  *
7674  * This function is called from timer soft interrupt context to check HBA's
7675  * error attention register bit for error attention events.
7676  *
7677  * This fucntion returns 1 when there is Error Attention in the Host Attention
7678  * Register and returns 0 otherwise.
7679  **/
7680 int
7681 lpfc_sli_check_eratt(struct lpfc_hba *phba)
7682 {
7683         uint32_t ha_copy;
7684
7685         /* If somebody is waiting to handle an eratt, don't process it
7686          * here. The brdkill function will do this.
7687          */
7688         if (phba->link_flag & LS_IGNORE_ERATT)
7689                 return 0;
7690
7691         /* Check if interrupt handler handles this ERATT */
7692         spin_lock_irq(&phba->hbalock);
7693         if (phba->hba_flag & HBA_ERATT_HANDLED) {
7694                 /* Interrupt handler has handled ERATT */
7695                 spin_unlock_irq(&phba->hbalock);
7696                 return 0;
7697         }
7698
7699         /*
7700          * If there is deferred error attention, do not check for error
7701          * attention
7702          */
7703         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7704                 spin_unlock_irq(&phba->hbalock);
7705                 return 0;
7706         }
7707
7708         /* If PCI channel is offline, don't process it */
7709         if (unlikely(pci_channel_offline(phba->pcidev))) {
7710                 spin_unlock_irq(&phba->hbalock);
7711                 return 0;
7712         }
7713
7714         switch (phba->sli_rev) {
7715         case LPFC_SLI_REV2:
7716         case LPFC_SLI_REV3:
7717                 /* Read chip Host Attention (HA) register */
7718                 ha_copy = lpfc_sli_eratt_read(phba);
7719                 break;
7720         case LPFC_SLI_REV4:
7721                 /* Read devcie Uncoverable Error (UERR) registers */
7722                 ha_copy = lpfc_sli4_eratt_read(phba);
7723                 break;
7724         default:
7725                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7726                                 "0299 Invalid SLI revision (%d)\n",
7727                                 phba->sli_rev);
7728                 ha_copy = 0;
7729                 break;
7730         }
7731         spin_unlock_irq(&phba->hbalock);
7732
7733         return ha_copy;
7734 }
7735
7736 /**
7737  * lpfc_intr_state_check - Check device state for interrupt handling
7738  * @phba: Pointer to HBA context.
7739  *
7740  * This inline routine checks whether a device or its PCI slot is in a state
7741  * that the interrupt should be handled.
7742  *
7743  * This function returns 0 if the device or the PCI slot is in a state that
7744  * interrupt should be handled, otherwise -EIO.
7745  */
7746 static inline int
7747 lpfc_intr_state_check(struct lpfc_hba *phba)
7748 {
7749         /* If the pci channel is offline, ignore all the interrupts */
7750         if (unlikely(pci_channel_offline(phba->pcidev)))
7751                 return -EIO;
7752
7753         /* Update device level interrupt statistics */
7754         phba->sli.slistat.sli_intr++;
7755
7756         /* Ignore all interrupts during initialization. */
7757         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7758                 return -EIO;
7759
7760         return 0;
7761 }
7762
7763 /**
7764  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
7765  * @irq: Interrupt number.
7766  * @dev_id: The device context pointer.
7767  *
7768  * This function is directly called from the PCI layer as an interrupt
7769  * service routine when device with SLI-3 interface spec is enabled with
7770  * MSI-X multi-message interrupt mode and there are slow-path events in
7771  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
7772  * interrupt mode, this function is called as part of the device-level
7773  * interrupt handler. When the PCI slot is in error recovery or the HBA
7774  * is undergoing initialization, the interrupt handler will not process
7775  * the interrupt. The link attention and ELS ring attention events are
7776  * handled by the worker thread. The interrupt handler signals the worker
7777  * thread and returns for these events. This function is called without
7778  * any lock held. It gets the hbalock to access and update SLI data
7779  * structures.
7780  *
7781  * This function returns IRQ_HANDLED when interrupt is handled else it
7782  * returns IRQ_NONE.
7783  **/
7784 irqreturn_t
7785 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7786 {
7787         struct lpfc_hba  *phba;
7788         uint32_t ha_copy;
7789         uint32_t work_ha_copy;
7790         unsigned long status;
7791         unsigned long iflag;
7792         uint32_t control;
7793
7794         MAILBOX_t *mbox, *pmbox;
7795         struct lpfc_vport *vport;
7796         struct lpfc_nodelist *ndlp;
7797         struct lpfc_dmabuf *mp;
7798         LPFC_MBOXQ_t *pmb;
7799         int rc;
7800
7801         /*
7802          * Get the driver's phba structure from the dev_id and
7803          * assume the HBA is not interrupting.
7804          */
7805         phba = (struct lpfc_hba *)dev_id;
7806
7807         if (unlikely(!phba))
7808                 return IRQ_NONE;
7809
7810         /*
7811          * Stuff needs to be attented to when this function is invoked as an
7812          * individual interrupt handler in MSI-X multi-message interrupt mode
7813          */
7814         if (phba->intr_type == MSIX) {
7815                 /* Check device state for handling interrupt */
7816                 if (lpfc_intr_state_check(phba))
7817                         return IRQ_NONE;
7818                 /* Need to read HA REG for slow-path events */
7819                 spin_lock_irqsave(&phba->hbalock, iflag);
7820                 ha_copy = readl(phba->HAregaddr);
7821                 /* If somebody is waiting to handle an eratt don't process it
7822                  * here. The brdkill function will do this.
7823                  */
7824                 if (phba->link_flag & LS_IGNORE_ERATT)
7825                         ha_copy &= ~HA_ERATT;
7826                 /* Check the need for handling ERATT in interrupt handler */
7827                 if (ha_copy & HA_ERATT) {
7828                         if (phba->hba_flag & HBA_ERATT_HANDLED)
7829                                 /* ERATT polling has handled ERATT */
7830                                 ha_copy &= ~HA_ERATT;
7831                         else
7832                                 /* Indicate interrupt handler handles ERATT */
7833                                 phba->hba_flag |= HBA_ERATT_HANDLED;
7834                 }
7835
7836                 /*
7837                  * If there is deferred error attention, do not check for any
7838                  * interrupt.
7839                  */
7840                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7841                         spin_unlock_irqrestore(&phba->hbalock, iflag);
7842                         return IRQ_NONE;
7843                 }
7844
7845                 /* Clear up only attention source related to slow-path */
7846                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7847                         phba->HAregaddr);
7848                 readl(phba->HAregaddr); /* flush */
7849                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7850         } else
7851                 ha_copy = phba->ha_copy;
7852
7853         work_ha_copy = ha_copy & phba->work_ha_mask;
7854
7855         if (work_ha_copy) {
7856                 if (work_ha_copy & HA_LATT) {
7857                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
7858                                 /*
7859                                  * Turn off Link Attention interrupts
7860                                  * until CLEAR_LA done
7861                                  */
7862                                 spin_lock_irqsave(&phba->hbalock, iflag);
7863                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
7864                                 control = readl(phba->HCregaddr);
7865                                 control &= ~HC_LAINT_ENA;
7866                                 writel(control, phba->HCregaddr);
7867                                 readl(phba->HCregaddr); /* flush */
7868                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7869                         }
7870                         else
7871                                 work_ha_copy &= ~HA_LATT;
7872                 }
7873
7874                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
7875                         /*
7876                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
7877                          * the only slow ring.
7878                          */
7879                         status = (work_ha_copy &
7880                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
7881                         status >>= (4*LPFC_ELS_RING);
7882                         if (status & HA_RXMASK) {
7883                                 spin_lock_irqsave(&phba->hbalock, iflag);
7884                                 control = readl(phba->HCregaddr);
7885
7886                                 lpfc_debugfs_slow_ring_trc(phba,
7887                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
7888                                 control, status,
7889                                 (uint32_t)phba->sli.slistat.sli_intr);
7890
7891                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
7892                                         lpfc_debugfs_slow_ring_trc(phba,
7893                                                 "ISR Disable ring:"
7894                                                 "pwork:x%x hawork:x%x wait:x%x",
7895                                                 phba->work_ha, work_ha_copy,
7896                                                 (uint32_t)((unsigned long)
7897                                                 &phba->work_waitq));
7898
7899                                         control &=
7900                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
7901                                         writel(control, phba->HCregaddr);
7902                                         readl(phba->HCregaddr); /* flush */
7903                                 }
7904                                 else {
7905                                         lpfc_debugfs_slow_ring_trc(phba,
7906                                                 "ISR slow ring:   pwork:"
7907                                                 "x%x hawork:x%x wait:x%x",
7908                                                 phba->work_ha, work_ha_copy,
7909                                                 (uint32_t)((unsigned long)
7910                                                 &phba->work_waitq));
7911                                 }
7912                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7913                         }
7914                 }
7915                 spin_lock_irqsave(&phba->hbalock, iflag);
7916                 if (work_ha_copy & HA_ERATT) {
7917                         lpfc_sli_read_hs(phba);
7918                         /*
7919                          * Check if there is a deferred error condition
7920                          * is active
7921                          */
7922                         if ((HS_FFER1 & phba->work_hs) &&
7923                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7924                                 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7925                                 phba->hba_flag |= DEFER_ERATT;
7926                                 /* Clear all interrupt enable conditions */
7927                                 writel(0, phba->HCregaddr);
7928                                 readl(phba->HCregaddr);
7929                         }
7930                 }
7931
7932                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
7933                         pmb = phba->sli.mbox_active;
7934                         pmbox = &pmb->u.mb;
7935                         mbox = phba->mbox;
7936                         vport = pmb->vport;
7937
7938                         /* First check out the status word */
7939                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
7940                         if (pmbox->mbxOwner != OWN_HOST) {
7941                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7942                                 /*
7943                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
7944                                  * mbxStatus <status>
7945                                  */
7946                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
7947                                                 LOG_SLI,
7948                                                 "(%d):0304 Stray Mailbox "
7949                                                 "Interrupt mbxCommand x%x "
7950                                                 "mbxStatus x%x\n",
7951                                                 (vport ? vport->vpi : 0),
7952                                                 pmbox->mbxCommand,
7953                                                 pmbox->mbxStatus);
7954                                 /* clear mailbox attention bit */
7955                                 work_ha_copy &= ~HA_MBATT;
7956                         } else {
7957                                 phba->sli.mbox_active = NULL;
7958                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7959                                 phba->last_completion_time = jiffies;
7960                                 del_timer(&phba->sli.mbox_tmo);
7961                                 if (pmb->mbox_cmpl) {
7962                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
7963                                                         MAILBOX_CMD_SIZE);
7964                                 }
7965                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
7966                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
7967
7968                                         lpfc_debugfs_disc_trc(vport,
7969                                                 LPFC_DISC_TRC_MBOX_VPORT,
7970                                                 "MBOX dflt rpi: : "
7971                                                 "status:x%x rpi:x%x",
7972                                                 (uint32_t)pmbox->mbxStatus,
7973                                                 pmbox->un.varWords[0], 0);
7974
7975                                         if (!pmbox->mbxStatus) {
7976                                                 mp = (struct lpfc_dmabuf *)
7977                                                         (pmb->context1);
7978                                                 ndlp = (struct lpfc_nodelist *)
7979                                                         pmb->context2;
7980
7981                                                 /* Reg_LOGIN of dflt RPI was
7982                                                  * successful. new lets get
7983                                                  * rid of the RPI using the
7984                                                  * same mbox buffer.
7985                                                  */
7986                                                 lpfc_unreg_login(phba,
7987                                                         vport->vpi,
7988                                                         pmbox->un.varWords[0],
7989                                                         pmb);
7990                                                 pmb->mbox_cmpl =
7991                                                         lpfc_mbx_cmpl_dflt_rpi;
7992                                                 pmb->context1 = mp;
7993                                                 pmb->context2 = ndlp;
7994                                                 pmb->vport = vport;
7995                                                 rc = lpfc_sli_issue_mbox(phba,
7996                                                                 pmb,
7997                                                                 MBX_NOWAIT);
7998                                                 if (rc != MBX_BUSY)
7999                                                         lpfc_printf_log(phba,
8000                                                         KERN_ERR,
8001                                                         LOG_MBOX | LOG_SLI,
8002                                                         "0350 rc should have"
8003                                                         "been MBX_BUSY\n");
8004                                                 if (rc != MBX_NOT_FINISHED)
8005                                                         goto send_current_mbox;
8006                                         }
8007                                 }
8008                                 spin_lock_irqsave(
8009                                                 &phba->pport->work_port_lock,
8010                                                 iflag);
8011                                 phba->pport->work_port_events &=
8012                                         ~WORKER_MBOX_TMO;
8013                                 spin_unlock_irqrestore(
8014                                                 &phba->pport->work_port_lock,
8015                                                 iflag);
8016                                 lpfc_mbox_cmpl_put(phba, pmb);
8017                         }
8018                 } else
8019                         spin_unlock_irqrestore(&phba->hbalock, iflag);
8020
8021                 if ((work_ha_copy & HA_MBATT) &&
8022                     (phba->sli.mbox_active == NULL)) {
8023 send_current_mbox:
8024                         /* Process next mailbox command if there is one */
8025                         do {
8026                                 rc = lpfc_sli_issue_mbox(phba, NULL,
8027                                                          MBX_NOWAIT);
8028                         } while (rc == MBX_NOT_FINISHED);
8029                         if (rc != MBX_SUCCESS)
8030                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8031                                                 LOG_SLI, "0349 rc should be "
8032                                                 "MBX_SUCCESS\n");
8033                 }
8034
8035                 spin_lock_irqsave(&phba->hbalock, iflag);
8036                 phba->work_ha |= work_ha_copy;
8037                 spin_unlock_irqrestore(&phba->hbalock, iflag);
8038                 lpfc_worker_wake_up(phba);
8039         }
8040         return IRQ_HANDLED;
8041
8042 } /* lpfc_sli_sp_intr_handler */
8043
8044 /**
8045  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
8046  * @irq: Interrupt number.
8047  * @dev_id: The device context pointer.
8048  *
8049  * This function is directly called from the PCI layer as an interrupt
8050  * service routine when device with SLI-3 interface spec is enabled with
8051  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
8052  * ring event in the HBA. However, when the device is enabled with either
8053  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
8054  * device-level interrupt handler. When the PCI slot is in error recovery
8055  * or the HBA is undergoing initialization, the interrupt handler will not
8056  * process the interrupt. The SCSI FCP fast-path ring event are handled in
8057  * the intrrupt context. This function is called without any lock held.
8058  * It gets the hbalock to access and update SLI data structures.
8059  *
8060  * This function returns IRQ_HANDLED when interrupt is handled else it
8061  * returns IRQ_NONE.
8062  **/
8063 irqreturn_t
8064 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
8065 {
8066         struct lpfc_hba  *phba;
8067         uint32_t ha_copy;
8068         unsigned long status;
8069         unsigned long iflag;
8070
8071         /* Get the driver's phba structure from the dev_id and
8072          * assume the HBA is not interrupting.
8073          */
8074         phba = (struct lpfc_hba *) dev_id;
8075
8076         if (unlikely(!phba))
8077                 return IRQ_NONE;
8078
8079         /*
8080          * Stuff needs to be attented to when this function is invoked as an
8081          * individual interrupt handler in MSI-X multi-message interrupt mode
8082          */
8083         if (phba->intr_type == MSIX) {
8084                 /* Check device state for handling interrupt */
8085                 if (lpfc_intr_state_check(phba))
8086                         return IRQ_NONE;
8087                 /* Need to read HA REG for FCP ring and other ring events */
8088                 ha_copy = readl(phba->HAregaddr);
8089                 /* Clear up only attention source related to fast-path */
8090                 spin_lock_irqsave(&phba->hbalock, iflag);
8091                 /*
8092                  * If there is deferred error attention, do not check for
8093                  * any interrupt.
8094                  */
8095                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8096                         spin_unlock_irqrestore(&phba->hbalock, iflag);
8097                         return IRQ_NONE;
8098                 }
8099                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
8100                         phba->HAregaddr);
8101                 readl(phba->HAregaddr); /* flush */
8102                 spin_unlock_irqrestore(&phba->hbalock, iflag);
8103         } else
8104                 ha_copy = phba->ha_copy;
8105
8106         /*
8107          * Process all events on FCP ring. Take the optimized path for FCP IO.
8108          */
8109         ha_copy &= ~(phba->work_ha_mask);
8110
8111         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
8112         status >>= (4*LPFC_FCP_RING);
8113         if (status & HA_RXMASK)
8114                 lpfc_sli_handle_fast_ring_event(phba,
8115                                                 &phba->sli.ring[LPFC_FCP_RING],
8116                                                 status);
8117
8118         if (phba->cfg_multi_ring_support == 2) {
8119                 /*
8120                  * Process all events on extra ring. Take the optimized path
8121                  * for extra ring IO.
8122                  */
8123                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
8124                 status >>= (4*LPFC_EXTRA_RING);
8125                 if (status & HA_RXMASK) {
8126                         lpfc_sli_handle_fast_ring_event(phba,
8127                                         &phba->sli.ring[LPFC_EXTRA_RING],
8128                                         status);
8129                 }
8130         }
8131         return IRQ_HANDLED;
8132 }  /* lpfc_sli_fp_intr_handler */
8133
8134 /**
8135  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
8136  * @irq: Interrupt number.
8137  * @dev_id: The device context pointer.
8138  *
8139  * This function is the HBA device-level interrupt handler to device with
8140  * SLI-3 interface spec, called from the PCI layer when either MSI or
8141  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
8142  * requires driver attention. This function invokes the slow-path interrupt
8143  * attention handling function and fast-path interrupt attention handling
8144  * function in turn to process the relevant HBA attention events. This
8145  * function is called without any lock held. It gets the hbalock to access
8146  * and update SLI data structures.
8147  *
8148  * This function returns IRQ_HANDLED when interrupt is handled, else it
8149  * returns IRQ_NONE.
8150  **/
8151 irqreturn_t
8152 lpfc_sli_intr_handler(int irq, void *dev_id)
8153 {
8154         struct lpfc_hba  *phba;
8155         irqreturn_t sp_irq_rc, fp_irq_rc;
8156         unsigned long status1, status2;
8157
8158         /*
8159          * Get the driver's phba structure from the dev_id and
8160          * assume the HBA is not interrupting.
8161          */
8162         phba = (struct lpfc_hba *) dev_id;
8163
8164         if (unlikely(!phba))
8165                 return IRQ_NONE;
8166
8167         /* Check device state for handling interrupt */
8168         if (lpfc_intr_state_check(phba))
8169                 return IRQ_NONE;
8170
8171         spin_lock(&phba->hbalock);
8172         phba->ha_copy = readl(phba->HAregaddr);
8173         if (unlikely(!phba->ha_copy)) {
8174                 spin_unlock(&phba->hbalock);
8175                 return IRQ_NONE;
8176         } else if (phba->ha_copy & HA_ERATT) {
8177                 if (phba->hba_flag & HBA_ERATT_HANDLED)
8178                         /* ERATT polling has handled ERATT */
8179                         phba->ha_copy &= ~HA_ERATT;
8180                 else
8181                         /* Indicate interrupt handler handles ERATT */
8182                         phba->hba_flag |= HBA_ERATT_HANDLED;
8183         }
8184
8185         /*
8186          * If there is deferred error attention, do not check for any interrupt.
8187          */
8188         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8189                 spin_unlock_irq(&phba->hbalock);
8190                 return IRQ_NONE;
8191         }
8192
8193         /* Clear attention sources except link and error attentions */
8194         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
8195         readl(phba->HAregaddr); /* flush */
8196         spin_unlock(&phba->hbalock);
8197
8198         /*
8199          * Invokes slow-path host attention interrupt handling as appropriate.
8200          */
8201
8202         /* status of events with mailbox and link attention */
8203         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
8204
8205         /* status of events with ELS ring */
8206         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
8207         status2 >>= (4*LPFC_ELS_RING);
8208
8209         if (status1 || (status2 & HA_RXMASK))
8210                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
8211         else
8212                 sp_irq_rc = IRQ_NONE;
8213
8214         /*
8215          * Invoke fast-path host attention interrupt handling as appropriate.
8216          */
8217
8218         /* status of events with FCP ring */
8219         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
8220         status1 >>= (4*LPFC_FCP_RING);
8221
8222         /* status of events with extra ring */
8223         if (phba->cfg_multi_ring_support == 2) {
8224                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
8225                 status2 >>= (4*LPFC_EXTRA_RING);
8226         } else
8227                 status2 = 0;
8228
8229         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
8230                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
8231         else
8232                 fp_irq_rc = IRQ_NONE;
8233
8234         /* Return device-level interrupt handling status */
8235         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
8236 }  /* lpfc_sli_intr_handler */
8237
8238 /**
8239  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8240  * @phba: pointer to lpfc hba data structure.
8241  *
8242  * This routine is invoked by the worker thread to process all the pending
8243  * SLI4 FCP abort XRI events.
8244  **/
8245 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8246 {
8247         struct lpfc_cq_event *cq_event;
8248
8249         /* First, declare the fcp xri abort event has been handled */
8250         spin_lock_irq(&phba->hbalock);
8251         phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8252         spin_unlock_irq(&phba->hbalock);
8253         /* Now, handle all the fcp xri abort events */
8254         while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8255                 /* Get the first event from the head of the event queue */
8256                 spin_lock_irq(&phba->hbalock);
8257                 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8258                                  cq_event, struct lpfc_cq_event, list);
8259                 spin_unlock_irq(&phba->hbalock);
8260                 /* Notify aborted XRI for FCP work queue */
8261                 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8262                 /* Free the event processed back to the free pool */
8263                 lpfc_sli4_cq_event_release(phba, cq_event);
8264         }
8265 }
8266
8267 /**
8268  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8269  * @phba: pointer to lpfc hba data structure.
8270  *
8271  * This routine is invoked by the worker thread to process all the pending
8272  * SLI4 els abort xri events.
8273  **/
8274 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8275 {
8276         struct lpfc_cq_event *cq_event;
8277
8278         /* First, declare the els xri abort event has been handled */
8279         spin_lock_irq(&phba->hbalock);
8280         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8281         spin_unlock_irq(&phba->hbalock);
8282         /* Now, handle all the els xri abort events */
8283         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8284                 /* Get the first event from the head of the event queue */
8285                 spin_lock_irq(&phba->hbalock);
8286                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8287                                  cq_event, struct lpfc_cq_event, list);
8288                 spin_unlock_irq(&phba->hbalock);
8289                 /* Notify aborted XRI for ELS work queue */
8290                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8291                 /* Free the event processed back to the free pool */
8292                 lpfc_sli4_cq_event_release(phba, cq_event);
8293         }
8294 }
8295
8296 static void
8297 lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8298                               struct lpfc_iocbq *pIocbOut,
8299                               struct lpfc_wcqe_complete *wcqe)
8300 {
8301         size_t offset = offsetof(struct lpfc_iocbq, iocb);
8302
8303         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8304                sizeof(struct lpfc_iocbq) - offset);
8305         /* Map WCQE parameters into irspiocb parameters */
8306         pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8307         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8308                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8309                         pIocbIn->iocb.un.fcpi.fcpi_parm =
8310                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
8311                                         wcqe->total_data_placed;
8312                 else
8313                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8314         else
8315                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8316 }
8317
8318 /**
8319  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8320  * @phba: Pointer to HBA context object.
8321  * @wcqe: Pointer to work-queue completion queue entry.
8322  *
8323  * This routine handles an ELS work-queue completion event and construct
8324  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8325  * discovery engine to handle.
8326  *
8327  * Return: Pointer to the receive IOCBQ, NULL otherwise.
8328  **/
8329 static struct lpfc_iocbq *
8330 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8331                                struct lpfc_iocbq *irspiocbq)
8332 {
8333         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8334         struct lpfc_iocbq *cmdiocbq;
8335         struct lpfc_wcqe_complete *wcqe;
8336         unsigned long iflags;
8337
8338         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8339         spin_lock_irqsave(&phba->hbalock, iflags);
8340         pring->stats.iocb_event++;
8341         /* Look up the ELS command IOCB and create pseudo response IOCB */
8342         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8343                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8344         spin_unlock_irqrestore(&phba->hbalock, iflags);
8345
8346         if (unlikely(!cmdiocbq)) {
8347                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8348                                 "0386 ELS complete with no corresponding "
8349                                 "cmdiocb: iotag (%d)\n",
8350                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8351                 lpfc_sli_release_iocbq(phba, irspiocbq);
8352                 return NULL;
8353         }
8354
8355         /* Fake the irspiocbq and copy necessary response information */
8356         lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8357
8358         return irspiocbq;
8359 }
8360
8361 /**
8362  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8363  * @phba: Pointer to HBA context object.
8364  * @cqe: Pointer to mailbox completion queue entry.
8365  *
8366  * This routine process a mailbox completion queue entry with asynchrous
8367  * event.
8368  *
8369  * Return: true if work posted to worker thread, otherwise false.
8370  **/
8371 static bool
8372 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8373 {
8374         struct lpfc_cq_event *cq_event;
8375         unsigned long iflags;
8376
8377         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8378                         "0392 Async Event: word0:x%x, word1:x%x, "
8379                         "word2:x%x, word3:x%x\n", mcqe->word0,
8380                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8381
8382         /* Allocate a new internal CQ_EVENT entry */
8383         cq_event = lpfc_sli4_cq_event_alloc(phba);
8384         if (!cq_event) {
8385                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8386                                 "0394 Failed to allocate CQ_EVENT entry\n");
8387                 return false;
8388         }
8389
8390         /* Move the CQE into an asynchronous event entry */
8391         memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8392         spin_lock_irqsave(&phba->hbalock, iflags);
8393         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8394         /* Set the async event flag */
8395         phba->hba_flag |= ASYNC_EVENT;
8396         spin_unlock_irqrestore(&phba->hbalock, iflags);
8397
8398         return true;
8399 }
8400
8401 /**
8402  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8403  * @phba: Pointer to HBA context object.
8404  * @cqe: Pointer to mailbox completion queue entry.
8405  *
8406  * This routine process a mailbox completion queue entry with mailbox
8407  * completion event.
8408  *
8409  * Return: true if work posted to worker thread, otherwise false.
8410  **/
8411 static bool
8412 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8413 {
8414         uint32_t mcqe_status;
8415         MAILBOX_t *mbox, *pmbox;
8416         struct lpfc_mqe *mqe;
8417         struct lpfc_vport *vport;
8418         struct lpfc_nodelist *ndlp;
8419         struct lpfc_dmabuf *mp;
8420         unsigned long iflags;
8421         LPFC_MBOXQ_t *pmb;
8422         bool workposted = false;
8423         int rc;
8424
8425         /* If not a mailbox complete MCQE, out by checking mailbox consume */
8426         if (!bf_get(lpfc_trailer_completed, mcqe))
8427                 goto out_no_mqe_complete;
8428
8429         /* Get the reference to the active mbox command */
8430         spin_lock_irqsave(&phba->hbalock, iflags);
8431         pmb = phba->sli.mbox_active;
8432         if (unlikely(!pmb)) {
8433                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8434                                 "1832 No pending MBOX command to handle\n");
8435                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8436                 goto out_no_mqe_complete;
8437         }
8438         spin_unlock_irqrestore(&phba->hbalock, iflags);
8439         mqe = &pmb->u.mqe;
8440         pmbox = (MAILBOX_t *)&pmb->u.mqe;
8441         mbox = phba->mbox;
8442         vport = pmb->vport;
8443
8444         /* Reset heartbeat timer */
8445         phba->last_completion_time = jiffies;
8446         del_timer(&phba->sli.mbox_tmo);
8447
8448         /* Move mbox data to caller's mailbox region, do endian swapping */
8449         if (pmb->mbox_cmpl && mbox)
8450                 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8451         /* Set the mailbox status with SLI4 range 0x4000 */
8452         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8453         if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8454                 bf_set(lpfc_mqe_status, mqe,
8455                        (LPFC_MBX_ERROR_RANGE | mcqe_status));
8456
8457         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8458                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8459                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8460                                       "MBOX dflt rpi: status:x%x rpi:x%x",
8461                                       mcqe_status,
8462                                       pmbox->un.varWords[0], 0);
8463                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8464                         mp = (struct lpfc_dmabuf *)(pmb->context1);
8465                         ndlp = (struct lpfc_nodelist *)pmb->context2;
8466                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
8467                          * RID of the PPI using the same mbox buffer.
8468                          */
8469                         lpfc_unreg_login(phba, vport->vpi,
8470                                          pmbox->un.varWords[0], pmb);
8471                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8472                         pmb->context1 = mp;
8473                         pmb->context2 = ndlp;
8474                         pmb->vport = vport;
8475                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8476                         if (rc != MBX_BUSY)
8477                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8478                                                 LOG_SLI, "0385 rc should "
8479                                                 "have been MBX_BUSY\n");
8480                         if (rc != MBX_NOT_FINISHED)
8481                                 goto send_current_mbox;
8482                 }
8483         }
8484         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8485         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8486         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8487
8488         /* There is mailbox completion work to do */
8489         spin_lock_irqsave(&phba->hbalock, iflags);
8490         __lpfc_mbox_cmpl_put(phba, pmb);
8491         phba->work_ha |= HA_MBATT;
8492         spin_unlock_irqrestore(&phba->hbalock, iflags);
8493         workposted = true;
8494
8495 send_current_mbox:
8496         spin_lock_irqsave(&phba->hbalock, iflags);
8497         /* Release the mailbox command posting token */
8498         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8499         /* Setting active mailbox pointer need to be in sync to flag clear */
8500         phba->sli.mbox_active = NULL;
8501         spin_unlock_irqrestore(&phba->hbalock, iflags);
8502         /* Wake up worker thread to post the next pending mailbox command */
8503         lpfc_worker_wake_up(phba);
8504 out_no_mqe_complete:
8505         if (bf_get(lpfc_trailer_consumed, mcqe))
8506                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8507         return workposted;
8508 }
8509
8510 /**
8511  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8512  * @phba: Pointer to HBA context object.
8513  * @cqe: Pointer to mailbox completion queue entry.
8514  *
8515  * This routine process a mailbox completion queue entry, it invokes the
8516  * proper mailbox complete handling or asynchrous event handling routine
8517  * according to the MCQE's async bit.
8518  *
8519  * Return: true if work posted to worker thread, otherwise false.
8520  **/
8521 static bool
8522 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8523 {
8524         struct lpfc_mcqe mcqe;
8525         bool workposted;
8526
8527         /* Copy the mailbox MCQE and convert endian order as needed */
8528         lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8529
8530         /* Invoke the proper event handling routine */
8531         if (!bf_get(lpfc_trailer_async, &mcqe))
8532                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8533         else
8534                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8535         return workposted;
8536 }
8537
8538 /**
8539  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8540  * @phba: Pointer to HBA context object.
8541  * @wcqe: Pointer to work-queue completion queue entry.
8542  *
8543  * This routine handles an ELS work-queue completion event.
8544  *
8545  * Return: true if work posted to worker thread, otherwise false.
8546  **/
8547 static bool
8548 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8549                              struct lpfc_wcqe_complete *wcqe)
8550 {
8551         struct lpfc_iocbq *irspiocbq;
8552         unsigned long iflags;
8553
8554         /* Get an irspiocbq for later ELS response processing use */
8555         irspiocbq = lpfc_sli_get_iocbq(phba);
8556         if (!irspiocbq) {
8557                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8558                                 "0387 Failed to allocate an iocbq\n");
8559                 return false;
8560         }
8561
8562         /* Save off the slow-path queue event for work thread to process */
8563         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
8564         spin_lock_irqsave(&phba->hbalock, iflags);
8565         list_add_tail(&irspiocbq->cq_event.list,
8566                       &phba->sli4_hba.sp_queue_event);
8567         phba->hba_flag |= HBA_SP_QUEUE_EVT;
8568         spin_unlock_irqrestore(&phba->hbalock, iflags);
8569
8570         return true;
8571 }
8572
8573 /**
8574  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8575  * @phba: Pointer to HBA context object.
8576  * @wcqe: Pointer to work-queue completion queue entry.
8577  *
8578  * This routine handles slow-path WQ entry comsumed event by invoking the
8579  * proper WQ release routine to the slow-path WQ.
8580  **/
8581 static void
8582 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8583                              struct lpfc_wcqe_release *wcqe)
8584 {
8585         /* Check for the slow-path ELS work queue */
8586         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8587                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8588                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8589         else
8590                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8591                                 "2579 Slow-path wqe consume event carries "
8592                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8593                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8594                                 phba->sli4_hba.els_wq->queue_id);
8595 }
8596
8597 /**
8598  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8599  * @phba: Pointer to HBA context object.
8600  * @cq: Pointer to a WQ completion queue.
8601  * @wcqe: Pointer to work-queue completion queue entry.
8602  *
8603  * This routine handles an XRI abort event.
8604  *
8605  * Return: true if work posted to worker thread, otherwise false.
8606  **/
8607 static bool
8608 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8609                                    struct lpfc_queue *cq,
8610                                    struct sli4_wcqe_xri_aborted *wcqe)
8611 {
8612         bool workposted = false;
8613         struct lpfc_cq_event *cq_event;
8614         unsigned long iflags;
8615
8616         /* Allocate a new internal CQ_EVENT entry */
8617         cq_event = lpfc_sli4_cq_event_alloc(phba);
8618         if (!cq_event) {
8619                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8620                                 "0602 Failed to allocate CQ_EVENT entry\n");
8621                 return false;
8622         }
8623
8624         /* Move the CQE into the proper xri abort event list */
8625         memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8626         switch (cq->subtype) {
8627         case LPFC_FCP:
8628                 spin_lock_irqsave(&phba->hbalock, iflags);
8629                 list_add_tail(&cq_event->list,
8630                               &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8631                 /* Set the fcp xri abort event flag */
8632                 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8633                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8634                 workposted = true;
8635                 break;
8636         case LPFC_ELS:
8637                 spin_lock_irqsave(&phba->hbalock, iflags);
8638                 list_add_tail(&cq_event->list,
8639                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8640                 /* Set the els xri abort event flag */
8641                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8642                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8643                 workposted = true;
8644                 break;
8645         default:
8646                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8647                                 "0603 Invalid work queue CQE subtype (x%x)\n",
8648                                 cq->subtype);
8649                 workposted = false;
8650                 break;
8651         }
8652         return workposted;
8653 }
8654
8655 /**
8656  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8657  * @phba: Pointer to HBA context object.
8658  * @rcqe: Pointer to receive-queue completion queue entry.
8659  *
8660  * This routine process a receive-queue completion queue entry.
8661  *
8662  * Return: true if work posted to worker thread, otherwise false.
8663  **/
8664 static bool
8665 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8666 {
8667         bool workposted = false;
8668         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8669         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8670         struct hbq_dmabuf *dma_buf;
8671         uint32_t status;
8672         unsigned long iflags;
8673
8674         lpfc_sli4_rq_release(hrq, drq);
8675         if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8676                 goto out;
8677
8678         status = bf_get(lpfc_rcqe_status, rcqe);
8679         switch (status) {
8680         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8681                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8682                                 "2537 Receive Frame Truncated!!\n");
8683         case FC_STATUS_RQ_SUCCESS:
8684                 spin_lock_irqsave(&phba->hbalock, iflags);
8685                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8686                 if (!dma_buf) {
8687                         spin_unlock_irqrestore(&phba->hbalock, iflags);
8688                         goto out;
8689                 }
8690                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8691                 /* save off the frame for the word thread to process */
8692                 list_add_tail(&dma_buf->cq_event.list,
8693                               &phba->sli4_hba.sp_queue_event);
8694                 /* Frame received */
8695                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8696                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8697                 workposted = true;
8698                 break;
8699         case FC_STATUS_INSUFF_BUF_NEED_BUF:
8700         case FC_STATUS_INSUFF_BUF_FRM_DISC:
8701                 /* Post more buffers if possible */
8702                 spin_lock_irqsave(&phba->hbalock, iflags);
8703                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8704                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8705                 workposted = true;
8706                 break;
8707         }
8708 out:
8709         return workposted;
8710 }
8711
8712 /**
8713  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8714  * @phba: Pointer to HBA context object.
8715  * @cq: Pointer to the completion queue.
8716  * @wcqe: Pointer to a completion queue entry.
8717  *
8718  * This routine process a slow-path work-queue or recieve queue completion queue
8719  * entry.
8720  *
8721  * Return: true if work posted to worker thread, otherwise false.
8722  **/
8723 static bool
8724 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8725                          struct lpfc_cqe *cqe)
8726 {
8727         struct lpfc_cqe cqevt;
8728         bool workposted = false;
8729
8730         /* Copy the work queue CQE and convert endian order if needed */
8731         lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
8732
8733         /* Check and process for different type of WCQE and dispatch */
8734         switch (bf_get(lpfc_cqe_code, &cqevt)) {
8735         case CQE_CODE_COMPL_WQE:
8736                 /* Process the WQ/RQ complete event */
8737                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8738                                 (struct lpfc_wcqe_complete *)&cqevt);
8739                 break;
8740         case CQE_CODE_RELEASE_WQE:
8741                 /* Process the WQ release event */
8742                 lpfc_sli4_sp_handle_rel_wcqe(phba,
8743                                 (struct lpfc_wcqe_release *)&cqevt);
8744                 break;
8745         case CQE_CODE_XRI_ABORTED:
8746                 /* Process the WQ XRI abort event */
8747                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8748                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
8749                 break;
8750         case CQE_CODE_RECEIVE:
8751                 /* Process the RQ event */
8752                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8753                                 (struct lpfc_rcqe *)&cqevt);
8754                 break;
8755         default:
8756                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8757                                 "0388 Not a valid WCQE code: x%x\n",
8758                                 bf_get(lpfc_cqe_code, &cqevt));
8759                 break;
8760         }
8761         return workposted;
8762 }
8763
8764 /**
8765  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8766  * @phba: Pointer to HBA context object.
8767  * @eqe: Pointer to fast-path event queue entry.
8768  *
8769  * This routine process a event queue entry from the slow-path event queue.
8770  * It will check the MajorCode and MinorCode to determine this is for a
8771  * completion event on a completion queue, if not, an error shall be logged
8772  * and just return. Otherwise, it will get to the corresponding completion
8773  * queue and process all the entries on that completion queue, rearm the
8774  * completion queue, and then return.
8775  *
8776  **/
8777 static void
8778 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8779 {
8780         struct lpfc_queue *cq = NULL, *childq, *speq;
8781         struct lpfc_cqe *cqe;
8782         bool workposted = false;
8783         int ecount = 0;
8784         uint16_t cqid;
8785
8786         if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8787             bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8788                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8789                                 "0359 Not a valid slow-path completion "
8790                                 "event: majorcode=x%x, minorcode=x%x\n",
8791                                 bf_get(lpfc_eqe_major_code, eqe),
8792                                 bf_get(lpfc_eqe_minor_code, eqe));
8793                 return;
8794         }
8795
8796         /* Get the reference to the corresponding CQ */
8797         cqid = bf_get(lpfc_eqe_resource_id, eqe);
8798
8799         /* Search for completion queue pointer matching this cqid */
8800         speq = phba->sli4_hba.sp_eq;
8801         list_for_each_entry(childq, &speq->child_list, list) {
8802                 if (childq->queue_id == cqid) {
8803                         cq = childq;
8804                         break;
8805                 }
8806         }
8807         if (unlikely(!cq)) {
8808                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8809                                 "0365 Slow-path CQ identifier (%d) does "
8810                                 "not exist\n", cqid);
8811                 return;
8812         }
8813
8814         /* Process all the entries to the CQ */
8815         switch (cq->type) {
8816         case LPFC_MCQ:
8817                 while ((cqe = lpfc_sli4_cq_get(cq))) {
8818                         workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8819                         if (!(++ecount % LPFC_GET_QE_REL_INT))
8820                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8821                 }
8822                 break;
8823         case LPFC_WCQ:
8824                 while ((cqe = lpfc_sli4_cq_get(cq))) {
8825                         workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
8826                         if (!(++ecount % LPFC_GET_QE_REL_INT))
8827                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8828                 }
8829                 break;
8830         default:
8831                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8832                                 "0370 Invalid completion queue type (%d)\n",
8833                                 cq->type);
8834                 return;
8835         }
8836
8837         /* Catch the no cq entry condition, log an error */
8838         if (unlikely(ecount == 0))
8839                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8840                                 "0371 No entry from the CQ: identifier "
8841                                 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8842
8843         /* In any case, flash and re-arm the RCQ */
8844         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8845
8846         /* wake up worker thread if there are works to be done */
8847         if (workposted)
8848                 lpfc_worker_wake_up(phba);
8849 }
8850
8851 /**
8852  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8853  * @eqe: Pointer to fast-path completion queue entry.
8854  *
8855  * This routine process a fast-path work queue completion entry from fast-path
8856  * event queue for FCP command response completion.
8857  **/
8858 static void
8859 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8860                              struct lpfc_wcqe_complete *wcqe)
8861 {
8862         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8863         struct lpfc_iocbq *cmdiocbq;
8864         struct lpfc_iocbq irspiocbq;
8865         unsigned long iflags;
8866
8867         spin_lock_irqsave(&phba->hbalock, iflags);
8868         pring->stats.iocb_event++;
8869         spin_unlock_irqrestore(&phba->hbalock, iflags);
8870
8871         /* Check for response status */
8872         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8873                 /* If resource errors reported from HBA, reduce queue
8874                  * depth of the SCSI device.
8875                  */
8876                 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8877                      IOSTAT_LOCAL_REJECT) &&
8878                     (wcqe->parameter == IOERR_NO_RESOURCES)) {
8879                         phba->lpfc_rampdown_queue_depth(phba);
8880                 }
8881                 /* Log the error status */
8882                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8883                                 "0373 FCP complete error: status=x%x, "
8884                                 "hw_status=x%x, total_data_specified=%d, "
8885                                 "parameter=x%x, word3=x%x\n",
8886                                 bf_get(lpfc_wcqe_c_status, wcqe),
8887                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8888                                 wcqe->total_data_placed, wcqe->parameter,
8889                                 wcqe->word3);
8890         }
8891
8892         /* Look up the FCP command IOCB and create pseudo response IOCB */
8893         spin_lock_irqsave(&phba->hbalock, iflags);
8894         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8895                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8896         spin_unlock_irqrestore(&phba->hbalock, iflags);
8897         if (unlikely(!cmdiocbq)) {
8898                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8899                                 "0374 FCP complete with no corresponding "
8900                                 "cmdiocb: iotag (%d)\n",
8901                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8902                 return;
8903         }
8904         if (unlikely(!cmdiocbq->iocb_cmpl)) {
8905                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8906                                 "0375 FCP cmdiocb not callback function "
8907                                 "iotag: (%d)\n",
8908                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8909                 return;
8910         }
8911
8912         /* Fake the irspiocb and copy necessary response information */
8913         lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8914
8915         /* Pass the cmd_iocb and the rsp state to the upper layer */
8916         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8917 }
8918
8919 /**
8920  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8921  * @phba: Pointer to HBA context object.
8922  * @cq: Pointer to completion queue.
8923  * @wcqe: Pointer to work-queue completion queue entry.
8924  *
8925  * This routine handles an fast-path WQ entry comsumed event by invoking the
8926  * proper WQ release routine to the slow-path WQ.
8927  **/
8928 static void
8929 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8930                              struct lpfc_wcqe_release *wcqe)
8931 {
8932         struct lpfc_queue *childwq;
8933         bool wqid_matched = false;
8934         uint16_t fcp_wqid;
8935
8936         /* Check for fast-path FCP work queue release */
8937         fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8938         list_for_each_entry(childwq, &cq->child_list, list) {
8939                 if (childwq->queue_id == fcp_wqid) {
8940                         lpfc_sli4_wq_release(childwq,
8941                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8942                         wqid_matched = true;
8943                         break;
8944                 }
8945         }
8946         /* Report warning log message if no match found */
8947         if (wqid_matched != true)
8948                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8949                                 "2580 Fast-path wqe consume event carries "
8950                                 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8951 }
8952
8953 /**
8954  * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8955  * @cq: Pointer to the completion queue.
8956  * @eqe: Pointer to fast-path completion queue entry.
8957  *
8958  * This routine process a fast-path work queue completion entry from fast-path
8959  * event queue for FCP command response completion.
8960  **/
8961 static int
8962 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8963                          struct lpfc_cqe *cqe)
8964 {
8965         struct lpfc_wcqe_release wcqe;
8966         bool workposted = false;
8967
8968         /* Copy the work queue CQE and convert endian order if needed */
8969         lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8970
8971         /* Check and process for different type of WCQE and dispatch */
8972         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8973         case CQE_CODE_COMPL_WQE:
8974                 /* Process the WQ complete event */
8975                 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8976                                 (struct lpfc_wcqe_complete *)&wcqe);
8977                 break;
8978         case CQE_CODE_RELEASE_WQE:
8979                 /* Process the WQ release event */
8980                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
8981                                 (struct lpfc_wcqe_release *)&wcqe);
8982                 break;
8983         case CQE_CODE_XRI_ABORTED:
8984                 /* Process the WQ XRI abort event */
8985                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8986                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
8987                 break;
8988         default:
8989                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8990                                 "0144 Not a valid WCQE code: x%x\n",
8991                                 bf_get(lpfc_wcqe_c_code, &wcqe));
8992                 break;
8993         }
8994         return workposted;
8995 }
8996
8997 /**
8998  * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
8999  * @phba: Pointer to HBA context object.
9000  * @eqe: Pointer to fast-path event queue entry.
9001  *
9002  * This routine process a event queue entry from the fast-path event queue.
9003  * It will check the MajorCode and MinorCode to determine this is for a
9004  * completion event on a completion queue, if not, an error shall be logged
9005  * and just return. Otherwise, it will get to the corresponding completion
9006  * queue and process all the entries on the completion queue, rearm the
9007  * completion queue, and then return.
9008  **/
9009 static void
9010 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9011                         uint32_t fcp_cqidx)
9012 {
9013         struct lpfc_queue *cq;
9014         struct lpfc_cqe *cqe;
9015         bool workposted = false;
9016         uint16_t cqid;
9017         int ecount = 0;
9018
9019         if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
9020             unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9021                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9022                                 "0366 Not a valid fast-path completion "
9023                                 "event: majorcode=x%x, minorcode=x%x\n",
9024                                 bf_get(lpfc_eqe_major_code, eqe),
9025                                 bf_get(lpfc_eqe_minor_code, eqe));
9026                 return;
9027         }
9028
9029         cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9030         if (unlikely(!cq)) {
9031                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9032                                 "0367 Fast-path completion queue does not "
9033                                 "exist\n");
9034                 return;
9035         }
9036
9037         /* Get the reference to the corresponding CQ */
9038         cqid = bf_get(lpfc_eqe_resource_id, eqe);
9039         if (unlikely(cqid != cq->queue_id)) {
9040                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9041                                 "0368 Miss-matched fast-path completion "
9042                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
9043                                 cqid, cq->queue_id);
9044                 return;
9045         }
9046
9047         /* Process all the entries to the CQ */
9048         while ((cqe = lpfc_sli4_cq_get(cq))) {
9049                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
9050                 if (!(++ecount % LPFC_GET_QE_REL_INT))
9051                         lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9052         }
9053
9054         /* Catch the no cq entry condition */
9055         if (unlikely(ecount == 0))
9056                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9057                                 "0369 No entry from fast-path completion "
9058                                 "queue fcpcqid=%d\n", cq->queue_id);
9059
9060         /* In any case, flash and re-arm the CQ */
9061         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
9062
9063         /* wake up worker thread if there are works to be done */
9064         if (workposted)
9065                 lpfc_worker_wake_up(phba);
9066 }
9067
9068 static void
9069 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
9070 {
9071         struct lpfc_eqe *eqe;
9072
9073         /* walk all the EQ entries and drop on the floor */
9074         while ((eqe = lpfc_sli4_eq_get(eq)))
9075                 ;
9076
9077         /* Clear and re-arm the EQ */
9078         lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
9079 }
9080
9081 /**
9082  * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
9083  * @irq: Interrupt number.
9084  * @dev_id: The device context pointer.
9085  *
9086  * This function is directly called from the PCI layer as an interrupt
9087  * service routine when device with SLI-4 interface spec is enabled with
9088  * MSI-X multi-message interrupt mode and there are slow-path events in
9089  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9090  * interrupt mode, this function is called as part of the device-level
9091  * interrupt handler. When the PCI slot is in error recovery or the HBA is
9092  * undergoing initialization, the interrupt handler will not process the
9093  * interrupt. The link attention and ELS ring attention events are handled
9094  * by the worker thread. The interrupt handler signals the worker thread
9095  * and returns for these events. This function is called without any lock
9096  * held. It gets the hbalock to access and update SLI data structures.
9097  *
9098  * This function returns IRQ_HANDLED when interrupt is handled else it
9099  * returns IRQ_NONE.
9100  **/
9101 irqreturn_t
9102 lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
9103 {
9104         struct lpfc_hba *phba;
9105         struct lpfc_queue *speq;
9106         struct lpfc_eqe *eqe;
9107         unsigned long iflag;
9108         int ecount = 0;
9109
9110         /*
9111          * Get the driver's phba structure from the dev_id
9112          */
9113         phba = (struct lpfc_hba *)dev_id;
9114
9115         if (unlikely(!phba))
9116                 return IRQ_NONE;
9117
9118         /* Get to the EQ struct associated with this vector */
9119         speq = phba->sli4_hba.sp_eq;
9120
9121         /* Check device state for handling interrupt */
9122         if (unlikely(lpfc_intr_state_check(phba))) {
9123                 /* Check again for link_state with lock held */
9124                 spin_lock_irqsave(&phba->hbalock, iflag);
9125                 if (phba->link_state < LPFC_LINK_DOWN)
9126                         /* Flush, clear interrupt, and rearm the EQ */
9127                         lpfc_sli4_eq_flush(phba, speq);
9128                 spin_unlock_irqrestore(&phba->hbalock, iflag);
9129                 return IRQ_NONE;
9130         }
9131
9132         /*
9133          * Process all the event on FCP slow-path EQ
9134          */
9135         while ((eqe = lpfc_sli4_eq_get(speq))) {
9136                 lpfc_sli4_sp_handle_eqe(phba, eqe);
9137                 if (!(++ecount % LPFC_GET_QE_REL_INT))
9138                         lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9139         }
9140
9141         /* Always clear and re-arm the slow-path EQ */
9142         lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9143
9144         /* Catch the no cq entry condition */
9145         if (unlikely(ecount == 0)) {
9146                 if (phba->intr_type == MSIX)
9147                         /* MSI-X treated interrupt served as no EQ share INT */
9148                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9149                                         "0357 MSI-X interrupt with no EQE\n");
9150                 else
9151                         /* Non MSI-X treated on interrupt as EQ share INT */
9152                         return IRQ_NONE;
9153         }
9154
9155         return IRQ_HANDLED;
9156 } /* lpfc_sli4_sp_intr_handler */
9157
9158 /**
9159  * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9160  * @irq: Interrupt number.
9161  * @dev_id: The device context pointer.
9162  *
9163  * This function is directly called from the PCI layer as an interrupt
9164  * service routine when device with SLI-4 interface spec is enabled with
9165  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9166  * ring event in the HBA. However, when the device is enabled with either
9167  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9168  * device-level interrupt handler. When the PCI slot is in error recovery
9169  * or the HBA is undergoing initialization, the interrupt handler will not
9170  * process the interrupt. The SCSI FCP fast-path ring event are handled in
9171  * the intrrupt context. This function is called without any lock held.
9172  * It gets the hbalock to access and update SLI data structures. Note that,
9173  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9174  * equal to that of FCP CQ index.
9175  *
9176  * This function returns IRQ_HANDLED when interrupt is handled else it
9177  * returns IRQ_NONE.
9178  **/
9179 irqreturn_t
9180 lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9181 {
9182         struct lpfc_hba *phba;
9183         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9184         struct lpfc_queue *fpeq;
9185         struct lpfc_eqe *eqe;
9186         unsigned long iflag;
9187         int ecount = 0;
9188         uint32_t fcp_eqidx;
9189
9190         /* Get the driver's phba structure from the dev_id */
9191         fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9192         phba = fcp_eq_hdl->phba;
9193         fcp_eqidx = fcp_eq_hdl->idx;
9194
9195         if (unlikely(!phba))
9196                 return IRQ_NONE;
9197
9198         /* Get to the EQ struct associated with this vector */
9199         fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9200
9201         /* Check device state for handling interrupt */
9202         if (unlikely(lpfc_intr_state_check(phba))) {
9203                 /* Check again for link_state with lock held */
9204                 spin_lock_irqsave(&phba->hbalock, iflag);
9205                 if (phba->link_state < LPFC_LINK_DOWN)
9206                         /* Flush, clear interrupt, and rearm the EQ */
9207                         lpfc_sli4_eq_flush(phba, fpeq);
9208                 spin_unlock_irqrestore(&phba->hbalock, iflag);
9209                 return IRQ_NONE;
9210         }
9211
9212         /*
9213          * Process all the event on FCP fast-path EQ
9214          */
9215         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9216                 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9217                 if (!(++ecount % LPFC_GET_QE_REL_INT))
9218                         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9219         }
9220
9221         /* Always clear and re-arm the fast-path EQ */
9222         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9223
9224         if (unlikely(ecount == 0)) {
9225                 if (phba->intr_type == MSIX)
9226                         /* MSI-X treated interrupt served as no EQ share INT */
9227                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9228                                         "0358 MSI-X interrupt with no EQE\n");
9229                 else
9230                         /* Non MSI-X treated on interrupt as EQ share INT */
9231                         return IRQ_NONE;
9232         }
9233
9234         return IRQ_HANDLED;
9235 } /* lpfc_sli4_fp_intr_handler */
9236
9237 /**
9238  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9239  * @irq: Interrupt number.
9240  * @dev_id: The device context pointer.
9241  *
9242  * This function is the device-level interrupt handler to device with SLI-4
9243  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9244  * interrupt mode is enabled and there is an event in the HBA which requires
9245  * driver attention. This function invokes the slow-path interrupt attention
9246  * handling function and fast-path interrupt attention handling function in
9247  * turn to process the relevant HBA attention events. This function is called
9248  * without any lock held. It gets the hbalock to access and update SLI data
9249  * structures.
9250  *
9251  * This function returns IRQ_HANDLED when interrupt is handled, else it
9252  * returns IRQ_NONE.
9253  **/
9254 irqreturn_t
9255 lpfc_sli4_intr_handler(int irq, void *dev_id)
9256 {
9257         struct lpfc_hba  *phba;
9258         irqreturn_t sp_irq_rc, fp_irq_rc;
9259         bool fp_handled = false;
9260         uint32_t fcp_eqidx;
9261
9262         /* Get the driver's phba structure from the dev_id */
9263         phba = (struct lpfc_hba *)dev_id;
9264
9265         if (unlikely(!phba))
9266                 return IRQ_NONE;
9267
9268         /*
9269          * Invokes slow-path host attention interrupt handling as appropriate.
9270          */
9271         sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9272
9273         /*
9274          * Invoke fast-path host attention interrupt handling as appropriate.
9275          */
9276         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9277                 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9278                                         &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9279                 if (fp_irq_rc == IRQ_HANDLED)
9280                         fp_handled |= true;
9281         }
9282
9283         return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9284 } /* lpfc_sli4_intr_handler */
9285
9286 /**
9287  * lpfc_sli4_queue_free - free a queue structure and associated memory
9288  * @queue: The queue structure to free.
9289  *
9290  * This function frees a queue structure and the DMAable memeory used for
9291  * the host resident queue. This function must be called after destroying the
9292  * queue on the HBA.
9293  **/
9294 void
9295 lpfc_sli4_queue_free(struct lpfc_queue *queue)
9296 {
9297         struct lpfc_dmabuf *dmabuf;
9298
9299         if (!queue)
9300                 return;
9301
9302         while (!list_empty(&queue->page_list)) {
9303                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9304                                  list);
9305                 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9306                                   dmabuf->virt, dmabuf->phys);
9307                 kfree(dmabuf);
9308         }
9309         kfree(queue);
9310         return;
9311 }
9312
9313 /**
9314  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9315  * @phba: The HBA that this queue is being created on.
9316  * @entry_size: The size of each queue entry for this queue.
9317  * @entry count: The number of entries that this queue will handle.
9318  *
9319  * This function allocates a queue structure and the DMAable memory used for
9320  * the host resident queue. This function must be called before creating the
9321  * queue on the HBA.
9322  **/
9323 struct lpfc_queue *
9324 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9325                       uint32_t entry_count)
9326 {
9327         struct lpfc_queue *queue;
9328         struct lpfc_dmabuf *dmabuf;
9329         int x, total_qe_count;
9330         void *dma_pointer;
9331
9332
9333         queue = kzalloc(sizeof(struct lpfc_queue) +
9334                         (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9335         if (!queue)
9336                 return NULL;
9337         queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9338         INIT_LIST_HEAD(&queue->list);
9339         INIT_LIST_HEAD(&queue->page_list);
9340         INIT_LIST_HEAD(&queue->child_list);
9341         for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9342                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9343                 if (!dmabuf)
9344                         goto out_fail;
9345                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9346                                                   PAGE_SIZE, &dmabuf->phys,
9347                                                   GFP_KERNEL);
9348                 if (!dmabuf->virt) {
9349                         kfree(dmabuf);
9350                         goto out_fail;
9351                 }
9352                 memset(dmabuf->virt, 0, PAGE_SIZE);
9353                 dmabuf->buffer_tag = x;
9354                 list_add_tail(&dmabuf->list, &queue->page_list);
9355                 /* initialize queue's entry array */
9356                 dma_pointer = dmabuf->virt;
9357                 for (; total_qe_count < entry_count &&
9358                      dma_pointer < (PAGE_SIZE + dmabuf->virt);
9359                      total_qe_count++, dma_pointer += entry_size) {
9360                         queue->qe[total_qe_count].address = dma_pointer;
9361                 }
9362         }
9363         queue->entry_size = entry_size;
9364         queue->entry_count = entry_count;
9365         queue->phba = phba;
9366
9367         return queue;
9368 out_fail:
9369         lpfc_sli4_queue_free(queue);
9370         return NULL;
9371 }
9372
9373 /**
9374  * lpfc_eq_create - Create an Event Queue on the HBA
9375  * @phba: HBA structure that indicates port to create a queue on.
9376  * @eq: The queue structure to use to create the event queue.
9377  * @imax: The maximum interrupt per second limit.
9378  *
9379  * This function creates an event queue, as detailed in @eq, on a port,
9380  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9381  *
9382  * The @phba struct is used to send mailbox command to HBA. The @eq struct
9383  * is used to get the entry count and entry size that are necessary to
9384  * determine the number of pages to allocate and use for this queue. This
9385  * function will send the EQ_CREATE mailbox command to the HBA to setup the
9386  * event queue. This function is asynchronous and will wait for the mailbox
9387  * command to finish before continuing.
9388  *
9389  * On success this function will return a zero. If unable to allocate enough
9390  * memory this function will return ENOMEM. If the queue create mailbox command
9391  * fails this function will return ENXIO.
9392  **/
9393 uint32_t
9394 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9395 {
9396         struct lpfc_mbx_eq_create *eq_create;
9397         LPFC_MBOXQ_t *mbox;
9398         int rc, length, status = 0;
9399         struct lpfc_dmabuf *dmabuf;
9400         uint32_t shdr_status, shdr_add_status;
9401         union lpfc_sli4_cfg_shdr *shdr;
9402         uint16_t dmult;
9403
9404         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9405         if (!mbox)
9406                 return -ENOMEM;
9407         length = (sizeof(struct lpfc_mbx_eq_create) -
9408                   sizeof(struct lpfc_sli4_cfg_mhdr));
9409         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9410                          LPFC_MBOX_OPCODE_EQ_CREATE,
9411                          length, LPFC_SLI4_MBX_EMBED);
9412         eq_create = &mbox->u.mqe.un.eq_create;
9413         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9414                eq->page_count);
9415         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9416                LPFC_EQE_SIZE);
9417         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9418         /* Calculate delay multiper from maximum interrupt per second */
9419         dmult = LPFC_DMULT_CONST/imax - 1;
9420         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9421                dmult);
9422         switch (eq->entry_count) {
9423         default:
9424                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9425                                 "0360 Unsupported EQ count. (%d)\n",
9426                                 eq->entry_count);
9427                 if (eq->entry_count < 256)
9428                         return -EINVAL;
9429                 /* otherwise default to smallest count (drop through) */
9430         case 256:
9431                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9432                        LPFC_EQ_CNT_256);
9433                 break;
9434         case 512:
9435                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9436                        LPFC_EQ_CNT_512);
9437                 break;
9438         case 1024:
9439                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9440                        LPFC_EQ_CNT_1024);
9441                 break;
9442         case 2048:
9443                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9444                        LPFC_EQ_CNT_2048);
9445                 break;
9446         case 4096:
9447                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9448                        LPFC_EQ_CNT_4096);
9449                 break;
9450         }
9451         list_for_each_entry(dmabuf, &eq->page_list, list) {
9452                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9453                                         putPaddrLow(dmabuf->phys);
9454                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9455                                         putPaddrHigh(dmabuf->phys);
9456         }
9457         mbox->vport = phba->pport;
9458         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9459         mbox->context1 = NULL;
9460         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9461         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9462         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9463         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9464         if (shdr_status || shdr_add_status || rc) {
9465                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9466                                 "2500 EQ_CREATE mailbox failed with "
9467                                 "status x%x add_status x%x, mbx status x%x\n",
9468                                 shdr_status, shdr_add_status, rc);
9469                 status = -ENXIO;
9470         }
9471         eq->type = LPFC_EQ;
9472         eq->subtype = LPFC_NONE;
9473         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9474         if (eq->queue_id == 0xFFFF)
9475                 status = -ENXIO;
9476         eq->host_index = 0;
9477         eq->hba_index = 0;
9478
9479         mempool_free(mbox, phba->mbox_mem_pool);
9480         return status;
9481 }
9482
9483 /**
9484  * lpfc_cq_create - Create a Completion Queue on the HBA
9485  * @phba: HBA structure that indicates port to create a queue on.
9486  * @cq: The queue structure to use to create the completion queue.
9487  * @eq: The event queue to bind this completion queue to.
9488  *
9489  * This function creates a completion queue, as detailed in @wq, on a port,
9490  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9491  *
9492  * The @phba struct is used to send mailbox command to HBA. The @cq struct
9493  * is used to get the entry count and entry size that are necessary to
9494  * determine the number of pages to allocate and use for this queue. The @eq
9495  * is used to indicate which event queue to bind this completion queue to. This
9496  * function will send the CQ_CREATE mailbox command to the HBA to setup the
9497  * completion queue. This function is asynchronous and will wait for the mailbox
9498  * command to finish before continuing.
9499  *
9500  * On success this function will return a zero. If unable to allocate enough
9501  * memory this function will return ENOMEM. If the queue create mailbox command
9502  * fails this function will return ENXIO.
9503  **/
9504 uint32_t
9505 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9506                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9507 {
9508         struct lpfc_mbx_cq_create *cq_create;
9509         struct lpfc_dmabuf *dmabuf;
9510         LPFC_MBOXQ_t *mbox;
9511         int rc, length, status = 0;
9512         uint32_t shdr_status, shdr_add_status;
9513         union lpfc_sli4_cfg_shdr *shdr;
9514
9515         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9516         if (!mbox)
9517                 return -ENOMEM;
9518         length = (sizeof(struct lpfc_mbx_cq_create) -
9519                   sizeof(struct lpfc_sli4_cfg_mhdr));
9520         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9521                          LPFC_MBOX_OPCODE_CQ_CREATE,
9522                          length, LPFC_SLI4_MBX_EMBED);
9523         cq_create = &mbox->u.mqe.un.cq_create;
9524         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9525                     cq->page_count);
9526         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9527         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9528         bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9529         switch (cq->entry_count) {
9530         default:
9531                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9532                                 "0361 Unsupported CQ count. (%d)\n",
9533                                 cq->entry_count);
9534                 if (cq->entry_count < 256)
9535                         return -EINVAL;
9536                 /* otherwise default to smallest count (drop through) */
9537         case 256:
9538                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9539                        LPFC_CQ_CNT_256);
9540                 break;
9541         case 512:
9542                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9543                        LPFC_CQ_CNT_512);
9544                 break;
9545         case 1024:
9546                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9547                        LPFC_CQ_CNT_1024);
9548                 break;
9549         }
9550         list_for_each_entry(dmabuf, &cq->page_list, list) {
9551                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9552                                         putPaddrLow(dmabuf->phys);
9553                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9554                                         putPaddrHigh(dmabuf->phys);
9555         }
9556         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9557
9558         /* The IOCTL status is embedded in the mailbox subheader. */
9559         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9560         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9561         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9562         if (shdr_status || shdr_add_status || rc) {
9563                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9564                                 "2501 CQ_CREATE mailbox failed with "
9565                                 "status x%x add_status x%x, mbx status x%x\n",
9566                                 shdr_status, shdr_add_status, rc);
9567                 status = -ENXIO;
9568                 goto out;
9569         }
9570         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9571         if (cq->queue_id == 0xFFFF) {
9572                 status = -ENXIO;
9573                 goto out;
9574         }
9575         /* link the cq onto the parent eq child list */
9576         list_add_tail(&cq->list, &eq->child_list);
9577         /* Set up completion queue's type and subtype */
9578         cq->type = type;
9579         cq->subtype = subtype;
9580         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9581         cq->host_index = 0;
9582         cq->hba_index = 0;
9583
9584 out:
9585         mempool_free(mbox, phba->mbox_mem_pool);
9586         return status;
9587 }
9588
9589 /**
9590  * lpfc_mq_create - Create a mailbox Queue on the HBA
9591  * @phba: HBA structure that indicates port to create a queue on.
9592  * @mq: The queue structure to use to create the mailbox queue.
9593  *
9594  * This function creates a mailbox queue, as detailed in @mq, on a port,
9595  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9596  *
9597  * The @phba struct is used to send mailbox command to HBA. The @cq struct
9598  * is used to get the entry count and entry size that are necessary to
9599  * determine the number of pages to allocate and use for this queue. This
9600  * function will send the MQ_CREATE mailbox command to the HBA to setup the
9601  * mailbox queue. This function is asynchronous and will wait for the mailbox
9602  * command to finish before continuing.
9603  *
9604  * On success this function will return a zero. If unable to allocate enough
9605  * memory this function will return ENOMEM. If the queue create mailbox command
9606  * fails this function will return ENXIO.
9607  **/
9608 uint32_t
9609 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9610                struct lpfc_queue *cq, uint32_t subtype)
9611 {
9612         struct lpfc_mbx_mq_create *mq_create;
9613         struct lpfc_dmabuf *dmabuf;
9614         LPFC_MBOXQ_t *mbox;
9615         int rc, length, status = 0;
9616         uint32_t shdr_status, shdr_add_status;
9617         union lpfc_sli4_cfg_shdr *shdr;
9618
9619         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9620         if (!mbox)
9621                 return -ENOMEM;
9622         length = (sizeof(struct lpfc_mbx_mq_create) -
9623                   sizeof(struct lpfc_sli4_cfg_mhdr));
9624         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9625                          LPFC_MBOX_OPCODE_MQ_CREATE,
9626                          length, LPFC_SLI4_MBX_EMBED);
9627         mq_create = &mbox->u.mqe.un.mq_create;
9628         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9629                     mq->page_count);
9630         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9631                     cq->queue_id);
9632         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9633         switch (mq->entry_count) {
9634         default:
9635                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9636                                 "0362 Unsupported MQ count. (%d)\n",
9637                                 mq->entry_count);
9638                 if (mq->entry_count < 16)
9639                         return -EINVAL;
9640                 /* otherwise default to smallest count (drop through) */
9641         case 16:
9642                 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9643                        LPFC_MQ_CNT_16);
9644                 break;
9645         case 32:
9646                 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9647                        LPFC_MQ_CNT_32);
9648                 break;
9649         case 64:
9650                 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9651                        LPFC_MQ_CNT_64);
9652                 break;
9653         case 128:
9654                 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9655                        LPFC_MQ_CNT_128);
9656                 break;
9657         }
9658         list_for_each_entry(dmabuf, &mq->page_list, list) {
9659                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9660                                         putPaddrLow(dmabuf->phys);
9661                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9662                                         putPaddrHigh(dmabuf->phys);
9663         }
9664         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9665         /* The IOCTL status is embedded in the mailbox subheader. */
9666         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9667         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9668         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9669         if (shdr_status || shdr_add_status || rc) {
9670                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9671                                 "2502 MQ_CREATE mailbox failed with "
9672                                 "status x%x add_status x%x, mbx status x%x\n",
9673                                 shdr_status, shdr_add_status, rc);
9674                 status = -ENXIO;
9675                 goto out;
9676         }
9677         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9678         if (mq->queue_id == 0xFFFF) {
9679                 status = -ENXIO;
9680                 goto out;
9681         }
9682         mq->type = LPFC_MQ;
9683         mq->subtype = subtype;
9684         mq->host_index = 0;
9685         mq->hba_index = 0;
9686
9687         /* link the mq onto the parent cq child list */
9688         list_add_tail(&mq->list, &cq->child_list);
9689 out:
9690         mempool_free(mbox, phba->mbox_mem_pool);
9691         return status;
9692 }
9693
9694 /**
9695  * lpfc_wq_create - Create a Work Queue on the HBA
9696  * @phba: HBA structure that indicates port to create a queue on.
9697  * @wq: The queue structure to use to create the work queue.
9698  * @cq: The completion queue to bind this work queue to.
9699  * @subtype: The subtype of the work queue indicating its functionality.
9700  *
9701  * This function creates a work queue, as detailed in @wq, on a port, described
9702  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9703  *
9704  * The @phba struct is used to send mailbox command to HBA. The @wq struct
9705  * is used to get the entry count and entry size that are necessary to
9706  * determine the number of pages to allocate and use for this queue. The @cq
9707  * is used to indicate which completion queue to bind this work queue to. This
9708  * function will send the WQ_CREATE mailbox command to the HBA to setup the
9709  * work queue. This function is asynchronous and will wait for the mailbox
9710  * command to finish before continuing.
9711  *
9712  * On success this function will return a zero. If unable to allocate enough
9713  * memory this function will return ENOMEM. If the queue create mailbox command
9714  * fails this function will return ENXIO.
9715  **/
9716 uint32_t
9717 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9718                struct lpfc_queue *cq, uint32_t subtype)
9719 {
9720         struct lpfc_mbx_wq_create *wq_create;
9721         struct lpfc_dmabuf *dmabuf;
9722         LPFC_MBOXQ_t *mbox;
9723         int rc, length, status = 0;
9724         uint32_t shdr_status, shdr_add_status;
9725         union lpfc_sli4_cfg_shdr *shdr;
9726
9727         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9728         if (!mbox)
9729                 return -ENOMEM;
9730         length = (sizeof(struct lpfc_mbx_wq_create) -
9731                   sizeof(struct lpfc_sli4_cfg_mhdr));
9732         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9733                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9734                          length, LPFC_SLI4_MBX_EMBED);
9735         wq_create = &mbox->u.mqe.un.wq_create;
9736         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9737                     wq->page_count);
9738         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9739                     cq->queue_id);
9740         list_for_each_entry(dmabuf, &wq->page_list, list) {
9741                 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9742                                         putPaddrLow(dmabuf->phys);
9743                 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9744                                         putPaddrHigh(dmabuf->phys);
9745         }
9746         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9747         /* The IOCTL status is embedded in the mailbox subheader. */
9748         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9749         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9750         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9751         if (shdr_status || shdr_add_status || rc) {
9752                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9753                                 "2503 WQ_CREATE mailbox failed with "
9754                                 "status x%x add_status x%x, mbx status x%x\n",
9755                                 shdr_status, shdr_add_status, rc);
9756                 status = -ENXIO;
9757                 goto out;
9758         }
9759         wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9760         if (wq->queue_id == 0xFFFF) {
9761                 status = -ENXIO;
9762                 goto out;
9763         }
9764         wq->type = LPFC_WQ;
9765         wq->subtype = subtype;
9766         wq->host_index = 0;
9767         wq->hba_index = 0;
9768
9769         /* link the wq onto the parent cq child list */
9770         list_add_tail(&wq->list, &cq->child_list);
9771 out:
9772         mempool_free(mbox, phba->mbox_mem_pool);
9773         return status;
9774 }
9775
9776 /**
9777  * lpfc_rq_create - Create a Receive Queue on the HBA
9778  * @phba: HBA structure that indicates port to create a queue on.
9779  * @hrq: The queue structure to use to create the header receive queue.
9780  * @drq: The queue structure to use to create the data receive queue.
9781  * @cq: The completion queue to bind this work queue to.
9782  *
9783  * This function creates a receive buffer queue pair , as detailed in @hrq and
9784  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9785  * to the HBA.
9786  *
9787  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9788  * struct is used to get the entry count that is necessary to determine the
9789  * number of pages to use for this queue. The @cq is used to indicate which
9790  * completion queue to bind received buffers that are posted to these queues to.
9791  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9792  * receive queue pair. This function is asynchronous and will wait for the
9793  * mailbox command to finish before continuing.
9794  *
9795  * On success this function will return a zero. If unable to allocate enough
9796  * memory this function will return ENOMEM. If the queue create mailbox command
9797  * fails this function will return ENXIO.
9798  **/
9799 uint32_t
9800 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9801                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9802 {
9803         struct lpfc_mbx_rq_create *rq_create;
9804         struct lpfc_dmabuf *dmabuf;
9805         LPFC_MBOXQ_t *mbox;
9806         int rc, length, status = 0;
9807         uint32_t shdr_status, shdr_add_status;
9808         union lpfc_sli4_cfg_shdr *shdr;
9809
9810         if (hrq->entry_count != drq->entry_count)
9811                 return -EINVAL;
9812         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9813         if (!mbox)
9814                 return -ENOMEM;
9815         length = (sizeof(struct lpfc_mbx_rq_create) -
9816                   sizeof(struct lpfc_sli4_cfg_mhdr));
9817         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9818                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9819                          length, LPFC_SLI4_MBX_EMBED);
9820         rq_create = &mbox->u.mqe.un.rq_create;
9821         switch (hrq->entry_count) {
9822         default:
9823                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9824                                 "2535 Unsupported RQ count. (%d)\n",
9825                                 hrq->entry_count);
9826                 if (hrq->entry_count < 512)
9827                         return -EINVAL;
9828                 /* otherwise default to smallest count (drop through) */
9829         case 512:
9830                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9831                        LPFC_RQ_RING_SIZE_512);
9832                 break;
9833         case 1024:
9834                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9835                        LPFC_RQ_RING_SIZE_1024);
9836                 break;
9837         case 2048:
9838                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9839                        LPFC_RQ_RING_SIZE_2048);
9840                 break;
9841         case 4096:
9842                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9843                        LPFC_RQ_RING_SIZE_4096);
9844                 break;
9845         }
9846         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9847                cq->queue_id);
9848         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9849                hrq->page_count);
9850         bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9851                LPFC_HDR_BUF_SIZE);
9852         list_for_each_entry(dmabuf, &hrq->page_list, list) {
9853                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9854                                         putPaddrLow(dmabuf->phys);
9855                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9856                                         putPaddrHigh(dmabuf->phys);
9857         }
9858         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9859         /* The IOCTL status is embedded in the mailbox subheader. */
9860         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9861         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9862         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9863         if (shdr_status || shdr_add_status || rc) {
9864                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9865                                 "2504 RQ_CREATE mailbox failed with "
9866                                 "status x%x add_status x%x, mbx status x%x\n",
9867                                 shdr_status, shdr_add_status, rc);
9868                 status = -ENXIO;
9869                 goto out;
9870         }
9871         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9872         if (hrq->queue_id == 0xFFFF) {
9873                 status = -ENXIO;
9874                 goto out;
9875         }
9876         hrq->type = LPFC_HRQ;
9877         hrq->subtype = subtype;
9878         hrq->host_index = 0;
9879         hrq->hba_index = 0;
9880
9881         /* now create the data queue */
9882         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9883                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9884                          length, LPFC_SLI4_MBX_EMBED);
9885         switch (drq->entry_count) {
9886         default:
9887                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9888                                 "2536 Unsupported RQ count. (%d)\n",
9889                                 drq->entry_count);
9890                 if (drq->entry_count < 512)
9891                         return -EINVAL;
9892                 /* otherwise default to smallest count (drop through) */
9893         case 512:
9894                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9895                        LPFC_RQ_RING_SIZE_512);
9896                 break;
9897         case 1024:
9898                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9899                        LPFC_RQ_RING_SIZE_1024);
9900                 break;
9901         case 2048:
9902                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9903                        LPFC_RQ_RING_SIZE_2048);
9904                 break;
9905         case 4096:
9906                 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9907                        LPFC_RQ_RING_SIZE_4096);
9908                 break;
9909         }
9910         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9911                cq->queue_id);
9912         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9913                drq->page_count);
9914         bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9915                LPFC_DATA_BUF_SIZE);
9916         list_for_each_entry(dmabuf, &drq->page_list, list) {
9917                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9918                                         putPaddrLow(dmabuf->phys);
9919                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9920                                         putPaddrHigh(dmabuf->phys);
9921         }
9922         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9923         /* The IOCTL status is embedded in the mailbox subheader. */
9924         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9925         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9926         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9927         if (shdr_status || shdr_add_status || rc) {
9928                 status = -ENXIO;
9929                 goto out;
9930         }
9931         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9932         if (drq->queue_id == 0xFFFF) {
9933                 status = -ENXIO;
9934                 goto out;
9935         }
9936         drq->type = LPFC_DRQ;
9937         drq->subtype = subtype;
9938         drq->host_index = 0;
9939         drq->hba_index = 0;
9940
9941         /* link the header and data RQs onto the parent cq child list */
9942         list_add_tail(&hrq->list, &cq->child_list);
9943         list_add_tail(&drq->list, &cq->child_list);
9944
9945 out:
9946         mempool_free(mbox, phba->mbox_mem_pool);
9947         return status;
9948 }
9949
9950 /**
9951  * lpfc_eq_destroy - Destroy an event Queue on the HBA
9952  * @eq: The queue structure associated with the queue to destroy.
9953  *
9954  * This function destroys a queue, as detailed in @eq by sending an mailbox
9955  * command, specific to the type of queue, to the HBA.
9956  *
9957  * The @eq struct is used to get the queue ID of the queue to destroy.
9958  *
9959  * On success this function will return a zero. If the queue destroy mailbox
9960  * command fails this function will return ENXIO.
9961  **/
9962 uint32_t
9963 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9964 {
9965         LPFC_MBOXQ_t *mbox;
9966         int rc, length, status = 0;
9967         uint32_t shdr_status, shdr_add_status;
9968         union lpfc_sli4_cfg_shdr *shdr;
9969
9970         if (!eq)
9971                 return -ENODEV;
9972         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
9973         if (!mbox)
9974                 return -ENOMEM;
9975         length = (sizeof(struct lpfc_mbx_eq_destroy) -
9976                   sizeof(struct lpfc_sli4_cfg_mhdr));
9977         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9978                          LPFC_MBOX_OPCODE_EQ_DESTROY,
9979                          length, LPFC_SLI4_MBX_EMBED);
9980         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
9981                eq->queue_id);
9982         mbox->vport = eq->phba->pport;
9983         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9984
9985         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
9986         /* The IOCTL status is embedded in the mailbox subheader. */
9987         shdr = (union lpfc_sli4_cfg_shdr *)
9988                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
9989         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9990         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9991         if (shdr_status || shdr_add_status || rc) {
9992                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9993                                 "2505 EQ_DESTROY mailbox failed with "
9994                                 "status x%x add_status x%x, mbx status x%x\n",
9995                                 shdr_status, shdr_add_status, rc);
9996                 status = -ENXIO;
9997         }
9998
9999         /* Remove eq from any list */
10000         list_del_init(&eq->list);
10001         mempool_free(mbox, eq->phba->mbox_mem_pool);
10002         return status;
10003 }
10004
10005 /**
10006  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
10007  * @cq: The queue structure associated with the queue to destroy.
10008  *
10009  * This function destroys a queue, as detailed in @cq by sending an mailbox
10010  * command, specific to the type of queue, to the HBA.
10011  *
10012  * The @cq struct is used to get the queue ID of the queue to destroy.
10013  *
10014  * On success this function will return a zero. If the queue destroy mailbox
10015  * command fails this function will return ENXIO.
10016  **/
10017 uint32_t
10018 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10019 {
10020         LPFC_MBOXQ_t *mbox;
10021         int rc, length, status = 0;
10022         uint32_t shdr_status, shdr_add_status;
10023         union lpfc_sli4_cfg_shdr *shdr;
10024
10025         if (!cq)
10026                 return -ENODEV;
10027         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
10028         if (!mbox)
10029                 return -ENOMEM;
10030         length = (sizeof(struct lpfc_mbx_cq_destroy) -
10031                   sizeof(struct lpfc_sli4_cfg_mhdr));
10032         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10033                          LPFC_MBOX_OPCODE_CQ_DESTROY,
10034                          length, LPFC_SLI4_MBX_EMBED);
10035         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
10036                cq->queue_id);
10037         mbox->vport = cq->phba->pport;
10038         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10039         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
10040         /* The IOCTL status is embedded in the mailbox subheader. */
10041         shdr = (union lpfc_sli4_cfg_shdr *)
10042                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
10043         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10044         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10045         if (shdr_status || shdr_add_status || rc) {
10046                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10047                                 "2506 CQ_DESTROY mailbox failed with "
10048                                 "status x%x add_status x%x, mbx status x%x\n",
10049                                 shdr_status, shdr_add_status, rc);
10050                 status = -ENXIO;
10051         }
10052         /* Remove cq from any list */
10053         list_del_init(&cq->list);
10054         mempool_free(mbox, cq->phba->mbox_mem_pool);
10055         return status;
10056 }
10057
10058 /**
10059  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
10060  * @qm: The queue structure associated with the queue to destroy.
10061  *
10062  * This function destroys a queue, as detailed in @mq by sending an mailbox
10063  * command, specific to the type of queue, to the HBA.
10064  *
10065  * The @mq struct is used to get the queue ID of the queue to destroy.
10066  *
10067  * On success this function will return a zero. If the queue destroy mailbox
10068  * command fails this function will return ENXIO.
10069  **/
10070 uint32_t
10071 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10072 {
10073         LPFC_MBOXQ_t *mbox;
10074         int rc, length, status = 0;
10075         uint32_t shdr_status, shdr_add_status;
10076         union lpfc_sli4_cfg_shdr *shdr;
10077
10078         if (!mq)
10079                 return -ENODEV;
10080         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
10081         if (!mbox)
10082                 return -ENOMEM;
10083         length = (sizeof(struct lpfc_mbx_mq_destroy) -
10084                   sizeof(struct lpfc_sli4_cfg_mhdr));
10085         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10086                          LPFC_MBOX_OPCODE_MQ_DESTROY,
10087                          length, LPFC_SLI4_MBX_EMBED);
10088         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
10089                mq->queue_id);
10090         mbox->vport = mq->phba->pport;
10091         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10092         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
10093         /* The IOCTL status is embedded in the mailbox subheader. */
10094         shdr = (union lpfc_sli4_cfg_shdr *)
10095                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
10096         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10097         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10098         if (shdr_status || shdr_add_status || rc) {
10099                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10100                                 "2507 MQ_DESTROY mailbox failed with "
10101                                 "status x%x add_status x%x, mbx status x%x\n",
10102                                 shdr_status, shdr_add_status, rc);
10103                 status = -ENXIO;
10104         }
10105         /* Remove mq from any list */
10106         list_del_init(&mq->list);
10107         mempool_free(mbox, mq->phba->mbox_mem_pool);
10108         return status;
10109 }
10110
10111 /**
10112  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10113  * @wq: The queue structure associated with the queue to destroy.
10114  *
10115  * This function destroys a queue, as detailed in @wq by sending an mailbox
10116  * command, specific to the type of queue, to the HBA.
10117  *
10118  * The @wq struct is used to get the queue ID of the queue to destroy.
10119  *
10120  * On success this function will return a zero. If the queue destroy mailbox
10121  * command fails this function will return ENXIO.
10122  **/
10123 uint32_t
10124 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10125 {
10126         LPFC_MBOXQ_t *mbox;
10127         int rc, length, status = 0;
10128         uint32_t shdr_status, shdr_add_status;
10129         union lpfc_sli4_cfg_shdr *shdr;
10130
10131         if (!wq)
10132                 return -ENODEV;
10133         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10134         if (!mbox)
10135                 return -ENOMEM;
10136         length = (sizeof(struct lpfc_mbx_wq_destroy) -
10137                   sizeof(struct lpfc_sli4_cfg_mhdr));
10138         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10139                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10140                          length, LPFC_SLI4_MBX_EMBED);
10141         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10142                wq->queue_id);
10143         mbox->vport = wq->phba->pport;
10144         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10145         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10146         shdr = (union lpfc_sli4_cfg_shdr *)
10147                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10148         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10149         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10150         if (shdr_status || shdr_add_status || rc) {
10151                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10152                                 "2508 WQ_DESTROY mailbox failed with "
10153                                 "status x%x add_status x%x, mbx status x%x\n",
10154                                 shdr_status, shdr_add_status, rc);
10155                 status = -ENXIO;
10156         }
10157         /* Remove wq from any list */
10158         list_del_init(&wq->list);
10159         mempool_free(mbox, wq->phba->mbox_mem_pool);
10160         return status;
10161 }
10162
10163 /**
10164  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10165  * @rq: The queue structure associated with the queue to destroy.
10166  *
10167  * This function destroys a queue, as detailed in @rq by sending an mailbox
10168  * command, specific to the type of queue, to the HBA.
10169  *
10170  * The @rq struct is used to get the queue ID of the queue to destroy.
10171  *
10172  * On success this function will return a zero. If the queue destroy mailbox
10173  * command fails this function will return ENXIO.
10174  **/
10175 uint32_t
10176 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10177                 struct lpfc_queue *drq)
10178 {
10179         LPFC_MBOXQ_t *mbox;
10180         int rc, length, status = 0;
10181         uint32_t shdr_status, shdr_add_status;
10182         union lpfc_sli4_cfg_shdr *shdr;
10183
10184         if (!hrq || !drq)
10185                 return -ENODEV;
10186         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10187         if (!mbox)
10188                 return -ENOMEM;
10189         length = (sizeof(struct lpfc_mbx_rq_destroy) -
10190                   sizeof(struct mbox_header));
10191         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10192                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10193                          length, LPFC_SLI4_MBX_EMBED);
10194         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10195                hrq->queue_id);
10196         mbox->vport = hrq->phba->pport;
10197         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10198         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10199         /* The IOCTL status is embedded in the mailbox subheader. */
10200         shdr = (union lpfc_sli4_cfg_shdr *)
10201                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10202         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10203         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10204         if (shdr_status || shdr_add_status || rc) {
10205                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10206                                 "2509 RQ_DESTROY mailbox failed with "
10207                                 "status x%x add_status x%x, mbx status x%x\n",
10208                                 shdr_status, shdr_add_status, rc);
10209                 if (rc != MBX_TIMEOUT)
10210                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
10211                 return -ENXIO;
10212         }
10213         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10214                drq->queue_id);
10215         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10216         shdr = (union lpfc_sli4_cfg_shdr *)
10217                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10218         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10219         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10220         if (shdr_status || shdr_add_status || rc) {
10221                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10222                                 "2510 RQ_DESTROY mailbox failed with "
10223                                 "status x%x add_status x%x, mbx status x%x\n",
10224                                 shdr_status, shdr_add_status, rc);
10225                 status = -ENXIO;
10226         }
10227         list_del_init(&hrq->list);
10228         list_del_init(&drq->list);
10229         mempool_free(mbox, hrq->phba->mbox_mem_pool);
10230         return status;
10231 }
10232
10233 /**
10234  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10235  * @phba: The virtual port for which this call being executed.
10236  * @pdma_phys_addr0: Physical address of the 1st SGL page.
10237  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10238  * @xritag: the xritag that ties this io to the SGL pages.
10239  *
10240  * This routine will post the sgl pages for the IO that has the xritag
10241  * that is in the iocbq structure. The xritag is assigned during iocbq
10242  * creation and persists for as long as the driver is loaded.
10243  * if the caller has fewer than 256 scatter gather segments to map then
10244  * pdma_phys_addr1 should be 0.
10245  * If the caller needs to map more than 256 scatter gather segment then
10246  * pdma_phys_addr1 should be a valid physical address.
10247  * physical address for SGLs must be 64 byte aligned.
10248  * If you are going to map 2 SGL's then the first one must have 256 entries
10249  * the second sgl can have between 1 and 256 entries.
10250  *
10251  * Return codes:
10252  *      0 - Success
10253  *      -ENXIO, -ENOMEM - Failure
10254  **/
10255 int
10256 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10257                 dma_addr_t pdma_phys_addr0,
10258                 dma_addr_t pdma_phys_addr1,
10259                 uint16_t xritag)
10260 {
10261         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10262         LPFC_MBOXQ_t *mbox;
10263         int rc;
10264         uint32_t shdr_status, shdr_add_status;
10265         union lpfc_sli4_cfg_shdr *shdr;
10266
10267         if (xritag == NO_XRI) {
10268                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10269                                 "0364 Invalid param:\n");
10270                 return -EINVAL;
10271         }
10272
10273         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10274         if (!mbox)
10275                 return -ENOMEM;
10276
10277         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10278                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10279                         sizeof(struct lpfc_mbx_post_sgl_pages) -
10280                         sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10281
10282         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10283                                 &mbox->u.mqe.un.post_sgl_pages;
10284         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10285         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10286
10287         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10288                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10289         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10290                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10291
10292         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10293                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10294         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10295                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10296         if (!phba->sli4_hba.intr_enable)
10297                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10298         else
10299                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10300         /* The IOCTL status is embedded in the mailbox subheader. */
10301         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10302         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10303         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10304         if (rc != MBX_TIMEOUT)
10305                 mempool_free(mbox, phba->mbox_mem_pool);
10306         if (shdr_status || shdr_add_status || rc) {
10307                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10308                                 "2511 POST_SGL mailbox failed with "
10309                                 "status x%x add_status x%x, mbx status x%x\n",
10310                                 shdr_status, shdr_add_status, rc);
10311                 rc = -ENXIO;
10312         }
10313         return 0;
10314 }
10315 /**
10316  * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10317  * @phba: The virtual port for which this call being executed.
10318  *
10319  * This routine will remove all of the sgl pages registered with the hba.
10320  *
10321  * Return codes:
10322  *      0 - Success
10323  *      -ENXIO, -ENOMEM - Failure
10324  **/
10325 int
10326 lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10327 {
10328         LPFC_MBOXQ_t *mbox;
10329         int rc;
10330         uint32_t shdr_status, shdr_add_status;
10331         union lpfc_sli4_cfg_shdr *shdr;
10332
10333         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10334         if (!mbox)
10335                 return -ENOMEM;
10336
10337         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10338                         LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10339                         LPFC_SLI4_MBX_EMBED);
10340         if (!phba->sli4_hba.intr_enable)
10341                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10342         else
10343                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10344         /* The IOCTL status is embedded in the mailbox subheader. */
10345         shdr = (union lpfc_sli4_cfg_shdr *)
10346                 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10347         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10348         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10349         if (rc != MBX_TIMEOUT)
10350                 mempool_free(mbox, phba->mbox_mem_pool);
10351         if (shdr_status || shdr_add_status || rc) {
10352                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10353                                 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10354                                 "status x%x add_status x%x, mbx status x%x\n",
10355                                 shdr_status, shdr_add_status, rc);
10356                 rc = -ENXIO;
10357         }
10358         return rc;
10359 }
10360
10361 /**
10362  * lpfc_sli4_next_xritag - Get an xritag for the io
10363  * @phba: Pointer to HBA context object.
10364  *
10365  * This function gets an xritag for the iocb. If there is no unused xritag
10366  * it will return 0xffff.
10367  * The function returns the allocated xritag if successful, else returns zero.
10368  * Zero is not a valid xritag.
10369  * The caller is not required to hold any lock.
10370  **/
10371 uint16_t
10372 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10373 {
10374         uint16_t xritag;
10375
10376         spin_lock_irq(&phba->hbalock);
10377         xritag = phba->sli4_hba.next_xri;
10378         if ((xritag != (uint16_t) -1) && xritag <
10379                 (phba->sli4_hba.max_cfg_param.max_xri
10380                         + phba->sli4_hba.max_cfg_param.xri_base)) {
10381                 phba->sli4_hba.next_xri++;
10382                 phba->sli4_hba.max_cfg_param.xri_used++;
10383                 spin_unlock_irq(&phba->hbalock);
10384                 return xritag;
10385         }
10386         spin_unlock_irq(&phba->hbalock);
10387         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10388                         "2004 Failed to allocate XRI.last XRITAG is %d"
10389                         " Max XRI is %d, Used XRI is %d\n",
10390                         phba->sli4_hba.next_xri,
10391                         phba->sli4_hba.max_cfg_param.max_xri,
10392                         phba->sli4_hba.max_cfg_param.xri_used);
10393         return -1;
10394 }
10395
10396 /**
10397  * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10398  * @phba: pointer to lpfc hba data structure.
10399  *
10400  * This routine is invoked to post a block of driver's sgl pages to the
10401  * HBA using non-embedded mailbox command. No Lock is held. This routine
10402  * is only called when the driver is loading and after all IO has been
10403  * stopped.
10404  **/
10405 int
10406 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10407 {
10408         struct lpfc_sglq *sglq_entry;
10409         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10410         struct sgl_page_pairs *sgl_pg_pairs;
10411         void *viraddr;
10412         LPFC_MBOXQ_t *mbox;
10413         uint32_t reqlen, alloclen, pg_pairs;
10414         uint32_t mbox_tmo;
10415         uint16_t xritag_start = 0;
10416         int els_xri_cnt, rc = 0;
10417         uint32_t shdr_status, shdr_add_status;
10418         union lpfc_sli4_cfg_shdr *shdr;
10419
10420         /* The number of sgls to be posted */
10421         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10422
10423         reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10424                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10425         if (reqlen > PAGE_SIZE) {
10426                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10427                                 "2559 Block sgl registration required DMA "
10428                                 "size (%d) great than a page\n", reqlen);
10429                 return -ENOMEM;
10430         }
10431         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10432         if (!mbox) {
10433                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10434                                 "2560 Failed to allocate mbox cmd memory\n");
10435                 return -ENOMEM;
10436         }
10437
10438         /* Allocate DMA memory and set up the non-embedded mailbox command */
10439         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10440                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10441                          LPFC_SLI4_MBX_NEMBED);
10442
10443         if (alloclen < reqlen) {
10444                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10445                                 "0285 Allocated DMA memory size (%d) is "
10446                                 "less than the requested DMA memory "
10447                                 "size (%d)\n", alloclen, reqlen);
10448                 lpfc_sli4_mbox_cmd_free(phba, mbox);
10449                 return -ENOMEM;
10450         }
10451         /* Get the first SGE entry from the non-embedded DMA memory */
10452         viraddr = mbox->sge_array->addr[0];
10453
10454         /* Set up the SGL pages in the non-embedded DMA pages */
10455         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10456         sgl_pg_pairs = &sgl->sgl_pg_pairs;
10457
10458         for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10459                 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10460                 /* Set up the sge entry */
10461                 sgl_pg_pairs->sgl_pg0_addr_lo =
10462                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10463                 sgl_pg_pairs->sgl_pg0_addr_hi =
10464                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10465                 sgl_pg_pairs->sgl_pg1_addr_lo =
10466                                 cpu_to_le32(putPaddrLow(0));
10467                 sgl_pg_pairs->sgl_pg1_addr_hi =
10468                                 cpu_to_le32(putPaddrHigh(0));
10469                 /* Keep the first xritag on the list */
10470                 if (pg_pairs == 0)
10471                         xritag_start = sglq_entry->sli4_xritag;
10472                 sgl_pg_pairs++;
10473         }
10474         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10475         bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
10476         /* Perform endian conversion if necessary */
10477         sgl->word0 = cpu_to_le32(sgl->word0);
10478
10479         if (!phba->sli4_hba.intr_enable)
10480                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10481         else {
10482                 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10483                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10484         }
10485         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10486         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10487         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10488         if (rc != MBX_TIMEOUT)
10489                 lpfc_sli4_mbox_cmd_free(phba, mbox);
10490         if (shdr_status || shdr_add_status || rc) {
10491                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10492                                 "2513 POST_SGL_BLOCK mailbox command failed "
10493                                 "status x%x add_status x%x mbx status x%x\n",
10494                                 shdr_status, shdr_add_status, rc);
10495                 rc = -ENXIO;
10496         }
10497         return rc;
10498 }
10499
10500 /**
10501  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10502  * @phba: pointer to lpfc hba data structure.
10503  * @sblist: pointer to scsi buffer list.
10504  * @count: number of scsi buffers on the list.
10505  *
10506  * This routine is invoked to post a block of @count scsi sgl pages from a
10507  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10508  * No Lock is held.
10509  *
10510  **/
10511 int
10512 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10513                               int cnt)
10514 {
10515         struct lpfc_scsi_buf *psb;
10516         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10517         struct sgl_page_pairs *sgl_pg_pairs;
10518         void *viraddr;
10519         LPFC_MBOXQ_t *mbox;
10520         uint32_t reqlen, alloclen, pg_pairs;
10521         uint32_t mbox_tmo;
10522         uint16_t xritag_start = 0;
10523         int rc = 0;
10524         uint32_t shdr_status, shdr_add_status;
10525         dma_addr_t pdma_phys_bpl1;
10526         union lpfc_sli4_cfg_shdr *shdr;
10527
10528         /* Calculate the requested length of the dma memory */
10529         reqlen = cnt * sizeof(struct sgl_page_pairs) +
10530                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10531         if (reqlen > PAGE_SIZE) {
10532                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10533                                 "0217 Block sgl registration required DMA "
10534                                 "size (%d) great than a page\n", reqlen);
10535                 return -ENOMEM;
10536         }
10537         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10538         if (!mbox) {
10539                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10540                                 "0283 Failed to allocate mbox cmd memory\n");
10541                 return -ENOMEM;
10542         }
10543
10544         /* Allocate DMA memory and set up the non-embedded mailbox command */
10545         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10546                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10547                                 LPFC_SLI4_MBX_NEMBED);
10548
10549         if (alloclen < reqlen) {
10550                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10551                                 "2561 Allocated DMA memory size (%d) is "
10552                                 "less than the requested DMA memory "
10553                                 "size (%d)\n", alloclen, reqlen);
10554                 lpfc_sli4_mbox_cmd_free(phba, mbox);
10555                 return -ENOMEM;
10556         }
10557         /* Get the first SGE entry from the non-embedded DMA memory */
10558         viraddr = mbox->sge_array->addr[0];
10559
10560         /* Set up the SGL pages in the non-embedded DMA pages */
10561         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10562         sgl_pg_pairs = &sgl->sgl_pg_pairs;
10563
10564         pg_pairs = 0;
10565         list_for_each_entry(psb, sblist, list) {
10566                 /* Set up the sge entry */
10567                 sgl_pg_pairs->sgl_pg0_addr_lo =
10568                         cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10569                 sgl_pg_pairs->sgl_pg0_addr_hi =
10570                         cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10571                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10572                         pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10573                 else
10574                         pdma_phys_bpl1 = 0;
10575                 sgl_pg_pairs->sgl_pg1_addr_lo =
10576                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10577                 sgl_pg_pairs->sgl_pg1_addr_hi =
10578                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10579                 /* Keep the first xritag on the list */
10580                 if (pg_pairs == 0)
10581                         xritag_start = psb->cur_iocbq.sli4_xritag;
10582                 sgl_pg_pairs++;
10583                 pg_pairs++;
10584         }
10585         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10586         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10587         /* Perform endian conversion if necessary */
10588         sgl->word0 = cpu_to_le32(sgl->word0);
10589
10590         if (!phba->sli4_hba.intr_enable)
10591                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10592         else {
10593                 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10594                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10595         }
10596         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10597         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10598         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10599         if (rc != MBX_TIMEOUT)
10600                 lpfc_sli4_mbox_cmd_free(phba, mbox);
10601         if (shdr_status || shdr_add_status || rc) {
10602                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10603                                 "2564 POST_SGL_BLOCK mailbox command failed "
10604                                 "status x%x add_status x%x mbx status x%x\n",
10605                                 shdr_status, shdr_add_status, rc);
10606                 rc = -ENXIO;
10607         }
10608         return rc;
10609 }
10610
10611 /**
10612  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10613  * @phba: pointer to lpfc_hba struct that the frame was received on
10614  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10615  *
10616  * This function checks the fields in the @fc_hdr to see if the FC frame is a
10617  * valid type of frame that the LPFC driver will handle. This function will
10618  * return a zero if the frame is a valid frame or a non zero value when the
10619  * frame does not pass the check.
10620  **/
10621 static int
10622 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10623 {
10624         char *rctl_names[] = FC_RCTL_NAMES_INIT;
10625         char *type_names[] = FC_TYPE_NAMES_INIT;
10626         struct fc_vft_header *fc_vft_hdr;
10627
10628         switch (fc_hdr->fh_r_ctl) {
10629         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
10630         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
10631         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
10632         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
10633         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
10634         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
10635         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
10636         case FC_RCTL_DD_CMD_STATUS:     /* command status */
10637         case FC_RCTL_ELS_REQ:   /* extended link services request */
10638         case FC_RCTL_ELS_REP:   /* extended link services reply */
10639         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
10640         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
10641         case FC_RCTL_BA_NOP:    /* basic link service NOP */
10642         case FC_RCTL_BA_ABTS:   /* basic link service abort */
10643         case FC_RCTL_BA_RMC:    /* remove connection */
10644         case FC_RCTL_BA_ACC:    /* basic accept */
10645         case FC_RCTL_BA_RJT:    /* basic reject */
10646         case FC_RCTL_BA_PRMT:
10647         case FC_RCTL_ACK_1:     /* acknowledge_1 */
10648         case FC_RCTL_ACK_0:     /* acknowledge_0 */
10649         case FC_RCTL_P_RJT:     /* port reject */
10650         case FC_RCTL_F_RJT:     /* fabric reject */
10651         case FC_RCTL_P_BSY:     /* port busy */
10652         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
10653         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
10654         case FC_RCTL_LCR:       /* link credit reset */
10655         case FC_RCTL_END:       /* end */
10656                 break;
10657         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
10658                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10659                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10660                 return lpfc_fc_frame_check(phba, fc_hdr);
10661         default:
10662                 goto drop;
10663         }
10664         switch (fc_hdr->fh_type) {
10665         case FC_TYPE_BLS:
10666         case FC_TYPE_ELS:
10667         case FC_TYPE_FCP:
10668         case FC_TYPE_CT:
10669                 break;
10670         case FC_TYPE_IP:
10671         case FC_TYPE_ILS:
10672         default:
10673                 goto drop;
10674         }
10675         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10676                         "2538 Received frame rctl:%s type:%s\n",
10677                         rctl_names[fc_hdr->fh_r_ctl],
10678                         type_names[fc_hdr->fh_type]);
10679         return 0;
10680 drop:
10681         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10682                         "2539 Dropped frame rctl:%s type:%s\n",
10683                         rctl_names[fc_hdr->fh_r_ctl],
10684                         type_names[fc_hdr->fh_type]);
10685         return 1;
10686 }
10687
10688 /**
10689  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10690  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10691  *
10692  * This function processes the FC header to retrieve the VFI from the VF
10693  * header, if one exists. This function will return the VFI if one exists
10694  * or 0 if no VSAN Header exists.
10695  **/
10696 static uint32_t
10697 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10698 {
10699         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10700
10701         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10702                 return 0;
10703         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10704 }
10705
10706 /**
10707  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10708  * @phba: Pointer to the HBA structure to search for the vport on
10709  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10710  * @fcfi: The FC Fabric ID that the frame came from
10711  *
10712  * This function searches the @phba for a vport that matches the content of the
10713  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10714  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10715  * returns the matching vport pointer or NULL if unable to match frame to a
10716  * vport.
10717  **/
10718 static struct lpfc_vport *
10719 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10720                        uint16_t fcfi)
10721 {
10722         struct lpfc_vport **vports;
10723         struct lpfc_vport *vport = NULL;
10724         int i;
10725         uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10726                         fc_hdr->fh_d_id[1] << 8 |
10727                         fc_hdr->fh_d_id[2]);
10728
10729         vports = lpfc_create_vport_work_array(phba);
10730         if (vports != NULL)
10731                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10732                         if (phba->fcf.fcfi == fcfi &&
10733                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10734                             vports[i]->fc_myDID == did) {
10735                                 vport = vports[i];
10736                                 break;
10737                         }
10738                 }
10739         lpfc_destroy_vport_work_array(phba, vports);
10740         return vport;
10741 }
10742
10743 /**
10744  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10745  * @vport: The vport to work on.
10746  *
10747  * This function updates the receive sequence time stamp for this vport. The
10748  * receive sequence time stamp indicates the time that the last frame of the
10749  * the sequence that has been idle for the longest amount of time was received.
10750  * the driver uses this time stamp to indicate if any received sequences have
10751  * timed out.
10752  **/
10753 void
10754 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10755 {
10756         struct lpfc_dmabuf *h_buf;
10757         struct hbq_dmabuf *dmabuf = NULL;
10758
10759         /* get the oldest sequence on the rcv list */
10760         h_buf = list_get_first(&vport->rcv_buffer_list,
10761                                struct lpfc_dmabuf, list);
10762         if (!h_buf)
10763                 return;
10764         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10765         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10766 }
10767
10768 /**
10769  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10770  * @vport: The vport that the received sequences were sent to.
10771  *
10772  * This function cleans up all outstanding received sequences. This is called
10773  * by the driver when a link event or user action invalidates all the received
10774  * sequences.
10775  **/
10776 void
10777 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10778 {
10779         struct lpfc_dmabuf *h_buf, *hnext;
10780         struct lpfc_dmabuf *d_buf, *dnext;
10781         struct hbq_dmabuf *dmabuf = NULL;
10782
10783         /* start with the oldest sequence on the rcv list */
10784         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10785                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10786                 list_del_init(&dmabuf->hbuf.list);
10787                 list_for_each_entry_safe(d_buf, dnext,
10788                                          &dmabuf->dbuf.list, list) {
10789                         list_del_init(&d_buf->list);
10790                         lpfc_in_buf_free(vport->phba, d_buf);
10791                 }
10792                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10793         }
10794 }
10795
10796 /**
10797  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
10798  * @vport: The vport that the received sequences were sent to.
10799  *
10800  * This function determines whether any received sequences have timed out by
10801  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
10802  * indicates that there is at least one timed out sequence this routine will
10803  * go through the received sequences one at a time from most inactive to most
10804  * active to determine which ones need to be cleaned up. Once it has determined
10805  * that a sequence needs to be cleaned up it will simply free up the resources
10806  * without sending an abort.
10807  **/
10808 void
10809 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
10810 {
10811         struct lpfc_dmabuf *h_buf, *hnext;
10812         struct lpfc_dmabuf *d_buf, *dnext;
10813         struct hbq_dmabuf *dmabuf = NULL;
10814         unsigned long timeout;
10815         int abort_count = 0;
10816
10817         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10818                    vport->rcv_buffer_time_stamp);
10819         if (list_empty(&vport->rcv_buffer_list) ||
10820             time_before(jiffies, timeout))
10821                 return;
10822         /* start with the oldest sequence on the rcv list */
10823         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10824                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10825                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10826                            dmabuf->time_stamp);
10827                 if (time_before(jiffies, timeout))
10828                         break;
10829                 abort_count++;
10830                 list_del_init(&dmabuf->hbuf.list);
10831                 list_for_each_entry_safe(d_buf, dnext,
10832                                          &dmabuf->dbuf.list, list) {
10833                         list_del_init(&d_buf->list);
10834                         lpfc_in_buf_free(vport->phba, d_buf);
10835                 }
10836                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10837         }
10838         if (abort_count)
10839                 lpfc_update_rcv_time_stamp(vport);
10840 }
10841
10842 /**
10843  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10844  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10845  *
10846  * This function searches through the existing incomplete sequences that have
10847  * been sent to this @vport. If the frame matches one of the incomplete
10848  * sequences then the dbuf in the @dmabuf is added to the list of frames that
10849  * make up that sequence. If no sequence is found that matches this frame then
10850  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10851  * This function returns a pointer to the first dmabuf in the sequence list that
10852  * the frame was linked to.
10853  **/
10854 static struct hbq_dmabuf *
10855 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10856 {
10857         struct fc_frame_header *new_hdr;
10858         struct fc_frame_header *temp_hdr;
10859         struct lpfc_dmabuf *d_buf;
10860         struct lpfc_dmabuf *h_buf;
10861         struct hbq_dmabuf *seq_dmabuf = NULL;
10862         struct hbq_dmabuf *temp_dmabuf = NULL;
10863
10864         INIT_LIST_HEAD(&dmabuf->dbuf.list);
10865         dmabuf->time_stamp = jiffies;
10866         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10867         /* Use the hdr_buf to find the sequence that this frame belongs to */
10868         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10869                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10870                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10871                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10872                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10873                         continue;
10874                 /* found a pending sequence that matches this frame */
10875                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10876                 break;
10877         }
10878         if (!seq_dmabuf) {
10879                 /*
10880                  * This indicates first frame received for this sequence.
10881                  * Queue the buffer on the vport's rcv_buffer_list.
10882                  */
10883                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10884                 lpfc_update_rcv_time_stamp(vport);
10885                 return dmabuf;
10886         }
10887         temp_hdr = seq_dmabuf->hbuf.virt;
10888         if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10889                 list_del_init(&seq_dmabuf->hbuf.list);
10890                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10891                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10892                 lpfc_update_rcv_time_stamp(vport);
10893                 return dmabuf;
10894         }
10895         /* move this sequence to the tail to indicate a young sequence */
10896         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
10897         seq_dmabuf->time_stamp = jiffies;
10898         lpfc_update_rcv_time_stamp(vport);
10899         /* find the correct place in the sequence to insert this frame */
10900         list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10901                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10902                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10903                 /*
10904                  * If the frame's sequence count is greater than the frame on
10905                  * the list then insert the frame right after this frame
10906                  */
10907                 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10908                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10909                         return seq_dmabuf;
10910                 }
10911         }
10912         return NULL;
10913 }
10914
10915 /**
10916  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
10917  * @vport: pointer to a vitural port
10918  * @dmabuf: pointer to a dmabuf that describes the FC sequence
10919  *
10920  * This function tries to abort from the partially assembed sequence, described
10921  * by the information from basic abbort @dmabuf. It checks to see whether such
10922  * partially assembled sequence held by the driver. If so, it shall free up all
10923  * the frames from the partially assembled sequence.
10924  *
10925  * Return
10926  * true  -- if there is matching partially assembled sequence present and all
10927  *          the frames freed with the sequence;
10928  * false -- if there is no matching partially assembled sequence present so
10929  *          nothing got aborted in the lower layer driver
10930  **/
10931 static bool
10932 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
10933                             struct hbq_dmabuf *dmabuf)
10934 {
10935         struct fc_frame_header *new_hdr;
10936         struct fc_frame_header *temp_hdr;
10937         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
10938         struct hbq_dmabuf *seq_dmabuf = NULL;
10939
10940         /* Use the hdr_buf to find the sequence that matches this frame */
10941         INIT_LIST_HEAD(&dmabuf->dbuf.list);
10942         INIT_LIST_HEAD(&dmabuf->hbuf.list);
10943         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10944         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10945                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10946                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10947                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10948                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10949                         continue;
10950                 /* found a pending sequence that matches this frame */
10951                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10952                 break;
10953         }
10954
10955         /* Free up all the frames from the partially assembled sequence */
10956         if (seq_dmabuf) {
10957                 list_for_each_entry_safe(d_buf, n_buf,
10958                                          &seq_dmabuf->dbuf.list, list) {
10959                         list_del_init(&d_buf->list);
10960                         lpfc_in_buf_free(vport->phba, d_buf);
10961                 }
10962                 return true;
10963         }
10964         return false;
10965 }
10966
10967 /**
10968  * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
10969  * @phba: Pointer to HBA context object.
10970  * @cmd_iocbq: pointer to the command iocbq structure.
10971  * @rsp_iocbq: pointer to the response iocbq structure.
10972  *
10973  * This function handles the sequence abort accept iocb command complete
10974  * event. It properly releases the memory allocated to the sequence abort
10975  * accept iocb.
10976  **/
10977 static void
10978 lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
10979                              struct lpfc_iocbq *cmd_iocbq,
10980                              struct lpfc_iocbq *rsp_iocbq)
10981 {
10982         if (cmd_iocbq)
10983                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
10984 }
10985
10986 /**
10987  * lpfc_sli4_seq_abort_acc - Accept sequence abort
10988  * @phba: Pointer to HBA context object.
10989  * @fc_hdr: pointer to a FC frame header.
10990  *
10991  * This function sends a basic accept to a previous unsol sequence abort
10992  * event after aborting the sequence handling.
10993  **/
10994 static void
10995 lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
10996                         struct fc_frame_header *fc_hdr)
10997 {
10998         struct lpfc_iocbq *ctiocb = NULL;
10999         struct lpfc_nodelist *ndlp;
11000         uint16_t oxid;
11001         uint32_t sid;
11002         IOCB_t *icmd;
11003
11004         if (!lpfc_is_link_up(phba))
11005                 return;
11006
11007         sid = sli4_sid_from_fc_hdr(fc_hdr);
11008         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
11009
11010         ndlp = lpfc_findnode_did(phba->pport, sid);
11011         if (!ndlp) {
11012                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
11013                                 "1268 Find ndlp returned NULL for oxid:x%x "
11014                                 "SID:x%x\n", oxid, sid);
11015                 return;
11016         }
11017
11018         /* Allocate buffer for acc iocb */
11019         ctiocb = lpfc_sli_get_iocbq(phba);
11020         if (!ctiocb)
11021                 return;
11022
11023         icmd = &ctiocb->iocb;
11024         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
11025         icmd->un.xseq64.bdl.bdeSize = 0;
11026         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11027         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
11028         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
11029
11030         /* Fill in the rest of iocb fields */
11031         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
11032         icmd->ulpBdeCount = 0;
11033         icmd->ulpLe = 1;
11034         icmd->ulpClass = CLASS3;
11035         icmd->ulpContext = ndlp->nlp_rpi;
11036         icmd->un.ulpWord[3] = oxid;
11037
11038         ctiocb->sli4_xritag = NO_XRI;
11039         ctiocb->iocb_cmpl = NULL;
11040         ctiocb->vport = phba->pport;
11041         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
11042
11043         /* Xmit CT abts accept on exchange <xid> */
11044         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11045                         "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11046                         CMD_XMIT_BLS_RSP64_CX, phba->link_state);
11047         lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
11048 }
11049
11050 /**
11051  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11052  * @vport: Pointer to the vport on which this sequence was received
11053  * @dmabuf: pointer to a dmabuf that describes the FC sequence
11054  *
11055  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11056  * receive sequence is only partially assembed by the driver, it shall abort
11057  * the partially assembled frames for the sequence. Otherwise, if the
11058  * unsolicited receive sequence has been completely assembled and passed to
11059  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11060  * unsolicited sequence has been aborted. After that, it will issue a basic
11061  * accept to accept the abort.
11062  **/
11063 void
11064 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
11065                              struct hbq_dmabuf *dmabuf)
11066 {
11067         struct lpfc_hba *phba = vport->phba;
11068         struct fc_frame_header fc_hdr;
11069         bool abts_par;
11070
11071         /* Try to abort partially assembled seq */
11072         abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
11073
11074         /* Make a copy of fc_hdr before the dmabuf being released */
11075         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
11076
11077         /* Send abort to ULP if partially seq abort failed */
11078         if (abts_par == false)
11079                 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
11080         else
11081                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11082         /* Send basic accept (BA_ACC) to the abort requester */
11083         lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
11084 }
11085
11086 /**
11087  * lpfc_seq_complete - Indicates if a sequence is complete
11088  * @dmabuf: pointer to a dmabuf that describes the FC sequence
11089  *
11090  * This function checks the sequence, starting with the frame described by
11091  * @dmabuf, to see if all the frames associated with this sequence are present.
11092  * the frames associated with this sequence are linked to the @dmabuf using the
11093  * dbuf list. This function looks for two major things. 1) That the first frame
11094  * has a sequence count of zero. 2) There is a frame with last frame of sequence
11095  * set. 3) That there are no holes in the sequence count. The function will
11096  * return 1 when the sequence is complete, otherwise it will return 0.
11097  **/
11098 static int
11099 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
11100 {
11101         struct fc_frame_header *hdr;
11102         struct lpfc_dmabuf *d_buf;
11103         struct hbq_dmabuf *seq_dmabuf;
11104         uint32_t fctl;
11105         int seq_count = 0;
11106
11107         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11108         /* make sure first fame of sequence has a sequence count of zero */
11109         if (hdr->fh_seq_cnt != seq_count)
11110                 return 0;
11111         fctl = (hdr->fh_f_ctl[0] << 16 |
11112                 hdr->fh_f_ctl[1] << 8 |
11113                 hdr->fh_f_ctl[2]);
11114         /* If last frame of sequence we can return success. */
11115         if (fctl & FC_FC_END_SEQ)
11116                 return 1;
11117         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
11118                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
11119                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11120                 /* If there is a hole in the sequence count then fail. */
11121                 if (++seq_count != hdr->fh_seq_cnt)
11122                         return 0;
11123                 fctl = (hdr->fh_f_ctl[0] << 16 |
11124                         hdr->fh_f_ctl[1] << 8 |
11125                         hdr->fh_f_ctl[2]);
11126                 /* If last frame of sequence we can return success. */
11127                 if (fctl & FC_FC_END_SEQ)
11128                         return 1;
11129         }
11130         return 0;
11131 }
11132
11133 /**
11134  * lpfc_prep_seq - Prep sequence for ULP processing
11135  * @vport: Pointer to the vport on which this sequence was received
11136  * @dmabuf: pointer to a dmabuf that describes the FC sequence
11137  *
11138  * This function takes a sequence, described by a list of frames, and creates
11139  * a list of iocbq structures to describe the sequence. This iocbq list will be
11140  * used to issue to the generic unsolicited sequence handler. This routine
11141  * returns a pointer to the first iocbq in the list. If the function is unable
11142  * to allocate an iocbq then it throw out the received frames that were not
11143  * able to be described and return a pointer to the first iocbq. If unable to
11144  * allocate any iocbqs (including the first) this function will return NULL.
11145  **/
11146 static struct lpfc_iocbq *
11147 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11148 {
11149         struct lpfc_dmabuf *d_buf, *n_buf;
11150         struct lpfc_iocbq *first_iocbq, *iocbq;
11151         struct fc_frame_header *fc_hdr;
11152         uint32_t sid;
11153
11154         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11155         /* remove from receive buffer list */
11156         list_del_init(&seq_dmabuf->hbuf.list);
11157         lpfc_update_rcv_time_stamp(vport);
11158         /* get the Remote Port's SID */
11159         sid = sli4_sid_from_fc_hdr(fc_hdr);
11160         /* Get an iocbq struct to fill in. */
11161         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
11162         if (first_iocbq) {
11163                 /* Initialize the first IOCB. */
11164                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
11165                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
11166                 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
11167                 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
11168                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
11169                                         vport->vpi + vport->phba->vpi_base;
11170                 /* put the first buffer into the first IOCBq */
11171                 first_iocbq->context2 = &seq_dmabuf->dbuf;
11172                 first_iocbq->context3 = NULL;
11173                 first_iocbq->iocb.ulpBdeCount = 1;
11174                 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
11175                                                         LPFC_DATA_BUF_SIZE;
11176                 first_iocbq->iocb.un.rcvels.remoteID = sid;
11177                 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
11178                                 bf_get(lpfc_rcqe_length,
11179                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
11180         }
11181         iocbq = first_iocbq;
11182         /*
11183          * Each IOCBq can have two Buffers assigned, so go through the list
11184          * of buffers for this sequence and save two buffers in each IOCBq
11185          */
11186         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
11187                 if (!iocbq) {
11188                         lpfc_in_buf_free(vport->phba, d_buf);
11189                         continue;
11190                 }
11191                 if (!iocbq->context3) {
11192                         iocbq->context3 = d_buf;
11193                         iocbq->iocb.ulpBdeCount++;
11194                         iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
11195                                                         LPFC_DATA_BUF_SIZE;
11196                         first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
11197                                 bf_get(lpfc_rcqe_length,
11198                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
11199                 } else {
11200                         iocbq = lpfc_sli_get_iocbq(vport->phba);
11201                         if (!iocbq) {
11202                                 if (first_iocbq) {
11203                                         first_iocbq->iocb.ulpStatus =
11204                                                         IOSTAT_FCP_RSP_ERROR;
11205                                         first_iocbq->iocb.un.ulpWord[4] =
11206                                                         IOERR_NO_RESOURCES;
11207                                 }
11208                                 lpfc_in_buf_free(vport->phba, d_buf);
11209                                 continue;
11210                         }
11211                         iocbq->context2 = d_buf;
11212                         iocbq->context3 = NULL;
11213                         iocbq->iocb.ulpBdeCount = 1;
11214                         iocbq->iocb.un.cont64[0].tus.f.bdeSize =
11215                                                         LPFC_DATA_BUF_SIZE;
11216                         first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
11217                                 bf_get(lpfc_rcqe_length,
11218                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
11219                         iocbq->iocb.un.rcvels.remoteID = sid;
11220                         list_add_tail(&iocbq->list, &first_iocbq->list);
11221                 }
11222         }
11223         return first_iocbq;
11224 }
11225
11226 static void
11227 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
11228                           struct hbq_dmabuf *seq_dmabuf)
11229 {
11230         struct fc_frame_header *fc_hdr;
11231         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
11232         struct lpfc_hba *phba = vport->phba;
11233
11234         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11235         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11236         if (!iocbq) {
11237                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11238                                 "2707 Ring %d handler: Failed to allocate "
11239                                 "iocb Rctl x%x Type x%x received\n",
11240                                 LPFC_ELS_RING,
11241                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11242                 return;
11243         }
11244         if (!lpfc_complete_unsol_iocb(phba,
11245                                       &phba->sli.ring[LPFC_ELS_RING],
11246                                       iocbq, fc_hdr->fh_r_ctl,
11247                                       fc_hdr->fh_type))
11248                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11249                                 "2540 Ring %d handler: unexpected Rctl "
11250                                 "x%x Type x%x received\n",
11251                                 LPFC_ELS_RING,
11252                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11253
11254         /* Free iocb created in lpfc_prep_seq */
11255         list_for_each_entry_safe(curr_iocb, next_iocb,
11256                 &iocbq->list, list) {
11257                 list_del_init(&curr_iocb->list);
11258                 lpfc_sli_release_iocbq(phba, curr_iocb);
11259         }
11260         lpfc_sli_release_iocbq(phba, iocbq);
11261 }
11262
11263 /**
11264  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11265  * @phba: Pointer to HBA context object.
11266  *
11267  * This function is called with no lock held. This function processes all
11268  * the received buffers and gives it to upper layers when a received buffer
11269  * indicates that it is the final frame in the sequence. The interrupt
11270  * service routine processes received buffers at interrupt contexts and adds
11271  * received dma buffers to the rb_pend_list queue and signals the worker thread.
11272  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11273  * appropriate receive function when the final frame in a sequence is received.
11274  **/
11275 void
11276 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11277                                  struct hbq_dmabuf *dmabuf)
11278 {
11279         struct hbq_dmabuf *seq_dmabuf;
11280         struct fc_frame_header *fc_hdr;
11281         struct lpfc_vport *vport;
11282         uint32_t fcfi;
11283
11284         /* Process each received buffer */
11285         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11286         /* check to see if this a valid type of frame */
11287         if (lpfc_fc_frame_check(phba, fc_hdr)) {
11288                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11289                 return;
11290         }
11291         fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11292         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11293         if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
11294                 /* throw out the frame */
11295                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11296                 return;
11297         }
11298         /* Handle the basic abort sequence (BA_ABTS) event */
11299         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
11300                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
11301                 return;
11302         }
11303
11304         /* Link this frame */
11305         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11306         if (!seq_dmabuf) {
11307                 /* unable to add frame to vport - throw it out */
11308                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11309                 return;
11310         }
11311         /* If not last frame in sequence continue processing frames. */
11312         if (!lpfc_seq_complete(seq_dmabuf)) {
11313                 /*
11314                  * When saving off frames post a new one and mark this
11315                  * frame to be freed when it is finished.
11316                  **/
11317                 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11318                 dmabuf->tag = -1;
11319                 return;
11320         }
11321         /* Send the complete sequence to the upper layer protocol */
11322         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11323 }
11324
11325 /**
11326  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
11327  * @phba: pointer to lpfc hba data structure.
11328  *
11329  * This routine is invoked to post rpi header templates to the
11330  * HBA consistent with the SLI-4 interface spec.  This routine
11331  * posts a PAGE_SIZE memory region to the port to hold up to
11332  * PAGE_SIZE modulo 64 rpi context headers.
11333  *
11334  * This routine does not require any locks.  It's usage is expected
11335  * to be driver load or reset recovery when the driver is
11336  * sequential.
11337  *
11338  * Return codes
11339  *      0 - sucessful
11340  *      EIO - The mailbox failed to complete successfully.
11341  *      When this error occurs, the driver is not guaranteed
11342  *      to have any rpi regions posted to the device and
11343  *      must either attempt to repost the regions or take a
11344  *      fatal error.
11345  **/
11346 int
11347 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11348 {
11349         struct lpfc_rpi_hdr *rpi_page;
11350         uint32_t rc = 0;
11351
11352         /* Post all rpi memory regions to the port. */
11353         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
11354                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
11355                 if (rc != MBX_SUCCESS) {
11356                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11357                                         "2008 Error %d posting all rpi "
11358                                         "headers\n", rc);
11359                         rc = -EIO;
11360                         break;
11361                 }
11362         }
11363
11364         return rc;
11365 }
11366
11367 /**
11368  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
11369  * @phba: pointer to lpfc hba data structure.
11370  * @rpi_page:  pointer to the rpi memory region.
11371  *
11372  * This routine is invoked to post a single rpi header to the
11373  * HBA consistent with the SLI-4 interface spec.  This memory region
11374  * maps up to 64 rpi context regions.
11375  *
11376  * Return codes
11377  *      0 - sucessful
11378  *      ENOMEM - No available memory
11379  *      EIO - The mailbox failed to complete successfully.
11380  **/
11381 int
11382 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11383 {
11384         LPFC_MBOXQ_t *mboxq;
11385         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
11386         uint32_t rc = 0;
11387         uint32_t mbox_tmo;
11388         uint32_t shdr_status, shdr_add_status;
11389         union lpfc_sli4_cfg_shdr *shdr;
11390
11391         /* The port is notified of the header region via a mailbox command. */
11392         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11393         if (!mboxq) {
11394                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11395                                 "2001 Unable to allocate memory for issuing "
11396                                 "SLI_CONFIG_SPECIAL mailbox command\n");
11397                 return -ENOMEM;
11398         }
11399
11400         /* Post all rpi memory regions to the port. */
11401         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11402         mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11403         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11404                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11405                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11406                          sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11407         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11408                hdr_tmpl, rpi_page->page_count);
11409         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11410                rpi_page->start_rpi);
11411         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11412         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11413         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11414         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11415         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11416         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11417         if (rc != MBX_TIMEOUT)
11418                 mempool_free(mboxq, phba->mbox_mem_pool);
11419         if (shdr_status || shdr_add_status || rc) {
11420                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11421                                 "2514 POST_RPI_HDR mailbox failed with "
11422                                 "status x%x add_status x%x, mbx status x%x\n",
11423                                 shdr_status, shdr_add_status, rc);
11424                 rc = -ENXIO;
11425         }
11426         return rc;
11427 }
11428
11429 /**
11430  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11431  * @phba: pointer to lpfc hba data structure.
11432  *
11433  * This routine is invoked to post rpi header templates to the
11434  * HBA consistent with the SLI-4 interface spec.  This routine
11435  * posts a PAGE_SIZE memory region to the port to hold up to
11436  * PAGE_SIZE modulo 64 rpi context headers.
11437  *
11438  * Returns
11439  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11440  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
11441  **/
11442 int
11443 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11444 {
11445         int rpi;
11446         uint16_t max_rpi, rpi_base, rpi_limit;
11447         uint16_t rpi_remaining;
11448         struct lpfc_rpi_hdr *rpi_hdr;
11449
11450         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11451         rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11452         rpi_limit = phba->sli4_hba.next_rpi;
11453
11454         /*
11455          * The valid rpi range is not guaranteed to be zero-based.  Start
11456          * the search at the rpi_base as reported by the port.
11457          */
11458         spin_lock_irq(&phba->hbalock);
11459         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11460         if (rpi >= rpi_limit || rpi < rpi_base)
11461                 rpi = LPFC_RPI_ALLOC_ERROR;
11462         else {
11463                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11464                 phba->sli4_hba.max_cfg_param.rpi_used++;
11465                 phba->sli4_hba.rpi_count++;
11466         }
11467
11468         /*
11469          * Don't try to allocate more rpi header regions if the device limit
11470          * on available rpis max has been exhausted.
11471          */
11472         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11473             (phba->sli4_hba.rpi_count >= max_rpi)) {
11474                 spin_unlock_irq(&phba->hbalock);
11475                 return rpi;
11476         }
11477
11478         /*
11479          * If the driver is running low on rpi resources, allocate another
11480          * page now.  Note that the next_rpi value is used because
11481          * it represents how many are actually in use whereas max_rpi notes
11482          * how many are supported max by the device.
11483          */
11484         rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11485                 phba->sli4_hba.rpi_count;
11486         spin_unlock_irq(&phba->hbalock);
11487         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11488                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11489                 if (!rpi_hdr) {
11490                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11491                                         "2002 Error Could not grow rpi "
11492                                         "count\n");
11493                 } else {
11494                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11495                 }
11496         }
11497
11498         return rpi;
11499 }
11500
11501 /**
11502  * lpfc_sli4_free_rpi - Release an rpi for reuse.
11503  * @phba: pointer to lpfc hba data structure.
11504  *
11505  * This routine is invoked to release an rpi to the pool of
11506  * available rpis maintained by the driver.
11507  **/
11508 void
11509 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11510 {
11511         spin_lock_irq(&phba->hbalock);
11512         clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11513         phba->sli4_hba.rpi_count--;
11514         phba->sli4_hba.max_cfg_param.rpi_used--;
11515         spin_unlock_irq(&phba->hbalock);
11516 }
11517
11518 /**
11519  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11520  * @phba: pointer to lpfc hba data structure.
11521  *
11522  * This routine is invoked to remove the memory region that
11523  * provided rpi via a bitmask.
11524  **/
11525 void
11526 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11527 {
11528         kfree(phba->sli4_hba.rpi_bmask);
11529 }
11530
11531 /**
11532  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11533  * @phba: pointer to lpfc hba data structure.
11534  *
11535  * This routine is invoked to remove the memory region that
11536  * provided rpi via a bitmask.
11537  **/
11538 int
11539 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11540 {
11541         LPFC_MBOXQ_t *mboxq;
11542         struct lpfc_hba *phba = ndlp->phba;
11543         int rc;
11544
11545         /* The port is notified of the header region via a mailbox command. */
11546         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11547         if (!mboxq)
11548                 return -ENOMEM;
11549
11550         /* Post all rpi memory regions to the port. */
11551         lpfc_resume_rpi(mboxq, ndlp);
11552         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11553         if (rc == MBX_NOT_FINISHED) {
11554                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11555                                 "2010 Resume RPI Mailbox failed "
11556                                 "status %d, mbxStatus x%x\n", rc,
11557                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11558                 mempool_free(mboxq, phba->mbox_mem_pool);
11559                 return -EIO;
11560         }
11561         return 0;
11562 }
11563
11564 /**
11565  * lpfc_sli4_init_vpi - Initialize a vpi with the port
11566  * @phba: pointer to lpfc hba data structure.
11567  * @vpi: vpi value to activate with the port.
11568  *
11569  * This routine is invoked to activate a vpi with the
11570  * port when the host intends to use vports with a
11571  * nonzero vpi.
11572  *
11573  * Returns:
11574  *    0 success
11575  *    -Evalue otherwise
11576  **/
11577 int
11578 lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11579 {
11580         LPFC_MBOXQ_t *mboxq;
11581         int rc = 0;
11582         int retval = MBX_SUCCESS;
11583         uint32_t mbox_tmo;
11584
11585         if (vpi == 0)
11586                 return -EINVAL;
11587         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11588         if (!mboxq)
11589                 return -ENOMEM;
11590         lpfc_init_vpi(phba, mboxq, vpi);
11591         mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11592         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11593         if (rc != MBX_SUCCESS) {
11594                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11595                                 "2022 INIT VPI Mailbox failed "
11596                                 "status %d, mbxStatus x%x\n", rc,
11597                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11598                 retval = -EIO;
11599         }
11600         if (rc != MBX_TIMEOUT)
11601                 mempool_free(mboxq, phba->mbox_mem_pool);
11602
11603         return retval;
11604 }
11605
11606 /**
11607  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11608  * @phba: pointer to lpfc hba data structure.
11609  * @mboxq: Pointer to mailbox object.
11610  *
11611  * This routine is invoked to manually add a single FCF record. The caller
11612  * must pass a completely initialized FCF_Record.  This routine takes
11613  * care of the nonembedded mailbox operations.
11614  **/
11615 static void
11616 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11617 {
11618         void *virt_addr;
11619         union lpfc_sli4_cfg_shdr *shdr;
11620         uint32_t shdr_status, shdr_add_status;
11621
11622         virt_addr = mboxq->sge_array->addr[0];
11623         /* The IOCTL status is embedded in the mailbox subheader. */
11624         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11625         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11626         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11627
11628         if ((shdr_status || shdr_add_status) &&
11629                 (shdr_status != STATUS_FCF_IN_USE))
11630                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11631                         "2558 ADD_FCF_RECORD mailbox failed with "
11632                         "status x%x add_status x%x\n",
11633                         shdr_status, shdr_add_status);
11634
11635         lpfc_sli4_mbox_cmd_free(phba, mboxq);
11636 }
11637
11638 /**
11639  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11640  * @phba: pointer to lpfc hba data structure.
11641  * @fcf_record:  pointer to the initialized fcf record to add.
11642  *
11643  * This routine is invoked to manually add a single FCF record. The caller
11644  * must pass a completely initialized FCF_Record.  This routine takes
11645  * care of the nonembedded mailbox operations.
11646  **/
11647 int
11648 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11649 {
11650         int rc = 0;
11651         LPFC_MBOXQ_t *mboxq;
11652         uint8_t *bytep;
11653         void *virt_addr;
11654         dma_addr_t phys_addr;
11655         struct lpfc_mbx_sge sge;
11656         uint32_t alloc_len, req_len;
11657         uint32_t fcfindex;
11658
11659         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11660         if (!mboxq) {
11661                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11662                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11663                 return -ENOMEM;
11664         }
11665
11666         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11667                   sizeof(uint32_t);
11668
11669         /* Allocate DMA memory and set up the non-embedded mailbox command */
11670         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11671                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11672                                      req_len, LPFC_SLI4_MBX_NEMBED);
11673         if (alloc_len < req_len) {
11674                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11675                         "2523 Allocated DMA memory size (x%x) is "
11676                         "less than the requested DMA memory "
11677                         "size (x%x)\n", alloc_len, req_len);
11678                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11679                 return -ENOMEM;
11680         }
11681
11682         /*
11683          * Get the first SGE entry from the non-embedded DMA memory.  This
11684          * routine only uses a single SGE.
11685          */
11686         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11687         phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11688         virt_addr = mboxq->sge_array->addr[0];
11689         /*
11690          * Configure the FCF record for FCFI 0.  This is the driver's
11691          * hardcoded default and gets used in nonFIP mode.
11692          */
11693         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11694         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11695         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11696
11697         /*
11698          * Copy the fcf_index and the FCF Record Data. The data starts after
11699          * the FCoE header plus word10. The data copy needs to be endian
11700          * correct.
11701          */
11702         bytep += sizeof(uint32_t);
11703         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11704         mboxq->vport = phba->pport;
11705         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11706         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11707         if (rc == MBX_NOT_FINISHED) {
11708                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11709                         "2515 ADD_FCF_RECORD mailbox failed with "
11710                         "status 0x%x\n", rc);
11711                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11712                 rc = -EIO;
11713         } else
11714                 rc = 0;
11715
11716         return rc;
11717 }
11718
11719 /**
11720  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11721  * @phba: pointer to lpfc hba data structure.
11722  * @fcf_record:  pointer to the fcf record to write the default data.
11723  * @fcf_index: FCF table entry index.
11724  *
11725  * This routine is invoked to build the driver's default FCF record.  The
11726  * values used are hardcoded.  This routine handles memory initialization.
11727  *
11728  **/
11729 void
11730 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11731                                 struct fcf_record *fcf_record,
11732                                 uint16_t fcf_index)
11733 {
11734         memset(fcf_record, 0, sizeof(struct fcf_record));
11735         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11736         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11737         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11738         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11739         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11740         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11741         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11742         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11743         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11744         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11745         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11746         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11747         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11748         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
11749         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11750         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11751                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11752         /* Set the VLAN bit map */
11753         if (phba->valid_vlan) {
11754                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11755                         = 1 << (phba->vlan_id % 8);
11756         }
11757 }
11758
11759 /**
11760  * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11761  * @phba: pointer to lpfc hba data structure.
11762  * @fcf_index: FCF table entry offset.
11763  *
11764  * This routine is invoked to read up to @fcf_num of FCF record from the
11765  * device starting with the given @fcf_index.
11766  **/
11767 int
11768 lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11769 {
11770         int rc = 0, error;
11771         LPFC_MBOXQ_t *mboxq;
11772         void *virt_addr;
11773         dma_addr_t phys_addr;
11774         uint8_t *bytep;
11775         struct lpfc_mbx_sge sge;
11776         uint32_t alloc_len, req_len;
11777         struct lpfc_mbx_read_fcf_tbl *read_fcf;
11778
11779         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11780         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11781         if (!mboxq) {
11782                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11783                                 "2000 Failed to allocate mbox for "
11784                                 "READ_FCF cmd\n");
11785                 error = -ENOMEM;
11786                 goto fail_fcfscan;
11787         }
11788
11789         req_len = sizeof(struct fcf_record) +
11790                   sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11791
11792         /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11793         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11794                          LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11795                          LPFC_SLI4_MBX_NEMBED);
11796
11797         if (alloc_len < req_len) {
11798                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11799                                 "0291 Allocated DMA memory size (x%x) is "
11800                                 "less than the requested DMA memory "
11801                                 "size (x%x)\n", alloc_len, req_len);
11802                 error = -ENOMEM;
11803                 goto fail_fcfscan;
11804         }
11805
11806         /* Get the first SGE entry from the non-embedded DMA memory. This
11807          * routine only uses a single SGE.
11808          */
11809         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11810         phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11811         virt_addr = mboxq->sge_array->addr[0];
11812         read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11813
11814         /* Set up command fields */
11815         bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11816         /* Perform necessary endian conversion */
11817         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11818         lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11819         mboxq->vport = phba->pport;
11820         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11821         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11822         if (rc == MBX_NOT_FINISHED) {
11823                 error = -EIO;
11824         } else {
11825                 spin_lock_irq(&phba->hbalock);
11826                 phba->hba_flag |= FCF_DISC_INPROGRESS;
11827                 spin_unlock_irq(&phba->hbalock);
11828                 error = 0;
11829         }
11830 fail_fcfscan:
11831         if (error) {
11832                 if (mboxq)
11833                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
11834                 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
11835                 spin_lock_irq(&phba->hbalock);
11836                 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
11837                 spin_unlock_irq(&phba->hbalock);
11838         }
11839         return error;
11840 }
11841
11842 /**
11843  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
11844  * @phba: pointer to lpfc hba data structure.
11845  *
11846  * This function read region 23 and parse TLV for port status to
11847  * decide if the user disaled the port. If the TLV indicates the
11848  * port is disabled, the hba_flag is set accordingly.
11849  **/
11850 void
11851 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
11852 {
11853         LPFC_MBOXQ_t *pmb = NULL;
11854         MAILBOX_t *mb;
11855         uint8_t *rgn23_data = NULL;
11856         uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
11857         int rc;
11858
11859         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11860         if (!pmb) {
11861                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11862                         "2600 lpfc_sli_read_serdes_param failed to"
11863                         " allocate mailbox memory\n");
11864                 goto out;
11865         }
11866         mb = &pmb->u.mb;
11867
11868         /* Get adapter Region 23 data */
11869         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
11870         if (!rgn23_data)
11871                 goto out;
11872
11873         do {
11874                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
11875                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11876
11877                 if (rc != MBX_SUCCESS) {
11878                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11879                                 "2601 lpfc_sli_read_link_ste failed to"
11880                                 " read config region 23 rc 0x%x Status 0x%x\n",
11881                                 rc, mb->mbxStatus);
11882                         mb->un.varDmp.word_cnt = 0;
11883                 }
11884                 /*
11885                  * dump mem may return a zero when finished or we got a
11886                  * mailbox error, either way we are done.
11887                  */
11888                 if (mb->un.varDmp.word_cnt == 0)
11889                         break;
11890                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
11891                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
11892
11893                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
11894                         rgn23_data + offset,
11895                         mb->un.varDmp.word_cnt);
11896                 offset += mb->un.varDmp.word_cnt;
11897         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
11898
11899         data_size = offset;
11900         offset = 0;
11901
11902         if (!data_size)
11903                 goto out;
11904
11905         /* Check the region signature first */
11906         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
11907                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11908                         "2619 Config region 23 has bad signature\n");
11909                         goto out;
11910         }
11911         offset += 4;
11912
11913         /* Check the data structure version */
11914         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
11915                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11916                         "2620 Config region 23 has bad version\n");
11917                 goto out;
11918         }
11919         offset += 4;
11920
11921         /* Parse TLV entries in the region */
11922         while (offset < data_size) {
11923                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
11924                         break;
11925                 /*
11926                  * If the TLV is not driver specific TLV or driver id is
11927                  * not linux driver id, skip the record.
11928                  */
11929                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
11930                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
11931                     (rgn23_data[offset + 3] != 0)) {
11932                         offset += rgn23_data[offset + 1] * 4 + 4;
11933                         continue;
11934                 }
11935
11936                 /* Driver found a driver specific TLV in the config region */
11937                 sub_tlv_len = rgn23_data[offset + 1] * 4;
11938                 offset += 4;
11939                 tlv_offset = 0;
11940
11941                 /*
11942                  * Search for configured port state sub-TLV.
11943                  */
11944                 while ((offset < data_size) &&
11945                         (tlv_offset < sub_tlv_len)) {
11946                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
11947                                 offset += 4;
11948                                 tlv_offset += 4;
11949                                 break;
11950                         }
11951                         if (rgn23_data[offset] != PORT_STE_TYPE) {
11952                                 offset += rgn23_data[offset + 1] * 4 + 4;
11953                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
11954                                 continue;
11955                         }
11956
11957                         /* This HBA contains PORT_STE configured */
11958                         if (!rgn23_data[offset + 2])
11959                                 phba->hba_flag |= LINK_DISABLED;
11960
11961                         goto out;
11962                 }
11963         }
11964 out:
11965         if (pmb)
11966                 mempool_free(pmb, phba->mbox_mem_pool);
11967         kfree(rgn23_data);
11968         return;
11969 }