[SCSI] be2iscsi: Fixing initialization of can_queue
[safe/jmp/linux-2.6] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43 static unsigned int ring_mode;
44
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53                                    "contiguous memory that can be allocated."
54                                    "Range is 16 - 128");
55
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58         blk_queue_max_segment_size(sdev->request_queue, 65536);
59         return 0;
60 }
61
62 /*------------------- PCI Driver operations and data ----------------- */
63 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69         { 0 }
70 };
71 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
73 static struct scsi_host_template beiscsi_sht = {
74         .module = THIS_MODULE,
75         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76         .proc_name = DRV_NAME,
77         .queuecommand = iscsi_queuecommand,
78         .eh_abort_handler = iscsi_eh_abort,
79         .change_queue_depth = iscsi_change_queue_depth,
80         .slave_configure = beiscsi_slave_configure,
81         .target_alloc = iscsi_target_alloc,
82         .eh_device_reset_handler = iscsi_eh_device_reset,
83         .eh_target_reset_handler = iscsi_eh_target_reset,
84         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85         .can_queue = BE2_IO_DEPTH,
86         .this_id = -1,
87         .max_sectors = BEISCSI_MAX_SECTORS,
88         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
89         .use_clustering = ENABLE_CLUSTERING,
90 };
91
92 static struct scsi_transport_template *beiscsi_scsi_transport;
93
94 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95 {
96         struct beiscsi_hba *phba;
97         struct Scsi_Host *shost;
98
99         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100         if (!shost) {
101                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102                         "iscsi_host_alloc failed \n");
103                 return NULL;
104         }
105         shost->dma_boundary = pcidev->dma_mask;
106         shost->max_id = BE2_MAX_SESSIONS;
107         shost->max_channel = 0;
108         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109         shost->max_lun = BEISCSI_NUM_MAX_LUN;
110         shost->transportt = beiscsi_scsi_transport;
111         phba = iscsi_host_priv(shost);
112         memset(phba, 0, sizeof(*phba));
113         phba->shost = shost;
114         phba->pcidev = pci_dev_get(pcidev);
115         pci_set_drvdata(pcidev, phba);
116
117         if (iscsi_host_add(shost, &phba->pcidev->dev))
118                 goto free_devices;
119         return phba;
120
121 free_devices:
122         pci_dev_put(phba->pcidev);
123         iscsi_host_free(phba->shost);
124         return NULL;
125 }
126
127 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
128 {
129         if (phba->csr_va) {
130                 iounmap(phba->csr_va);
131                 phba->csr_va = NULL;
132         }
133         if (phba->db_va) {
134                 iounmap(phba->db_va);
135                 phba->db_va = NULL;
136         }
137         if (phba->pci_va) {
138                 iounmap(phba->pci_va);
139                 phba->pci_va = NULL;
140         }
141 }
142
143 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
144                                 struct pci_dev *pcidev)
145 {
146         u8 __iomem *addr;
147
148         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
149                                pci_resource_len(pcidev, 2));
150         if (addr == NULL)
151                 return -ENOMEM;
152         phba->ctrl.csr = addr;
153         phba->csr_va = addr;
154         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
155
156         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
157         if (addr == NULL)
158                 goto pci_map_err;
159         phba->ctrl.db = addr;
160         phba->db_va = addr;
161         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
162
163         addr = ioremap_nocache(pci_resource_start(pcidev, 1),
164                                pci_resource_len(pcidev, 1));
165         if (addr == NULL)
166                 goto pci_map_err;
167         phba->ctrl.pcicfg = addr;
168         phba->pci_va = addr;
169         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
170         return 0;
171
172 pci_map_err:
173         beiscsi_unmap_pci_function(phba);
174         return -ENOMEM;
175 }
176
177 static int beiscsi_enable_pci(struct pci_dev *pcidev)
178 {
179         int ret;
180
181         ret = pci_enable_device(pcidev);
182         if (ret) {
183                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
184                         "failed. Returning -ENODEV\n");
185                 return ret;
186         }
187
188         pci_set_master(pcidev);
189         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
190                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
191                 if (ret) {
192                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
193                         pci_disable_device(pcidev);
194                         return ret;
195                 }
196         }
197         return 0;
198 }
199
200 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
201 {
202         struct be_ctrl_info *ctrl = &phba->ctrl;
203         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
204         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
205         int status = 0;
206
207         ctrl->pdev = pdev;
208         status = beiscsi_map_pci_bars(phba, pdev);
209         if (status)
210                 return status;
211         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
212         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
213                                                   mbox_mem_alloc->size,
214                                                   &mbox_mem_alloc->dma);
215         if (!mbox_mem_alloc->va) {
216                 beiscsi_unmap_pci_function(phba);
217                 status = -ENOMEM;
218                 return status;
219         }
220
221         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
222         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
223         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
224         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
225         spin_lock_init(&ctrl->mbox_lock);
226         spin_lock_init(&phba->ctrl.mcc_lock);
227         spin_lock_init(&phba->ctrl.mcc_cq_lock);
228
229         return status;
230 }
231
232 static void beiscsi_get_params(struct beiscsi_hba *phba)
233 {
234         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
235                                     - (phba->fw_config.iscsi_cid_count
236                                     + BE2_TMFS
237                                     + BE2_NOPOUT_REQ));
238         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
239         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
240         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
241         phba->params.num_sge_per_io = BE2_SGE;
242         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
243         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
244         phba->params.eq_timer = 64;
245         phba->params.num_eq_entries =
246             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
247                                     + BE2_TMFS) / 512) + 1) * 512;
248         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
249                                 ? 1024 : phba->params.num_eq_entries;
250         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
251                              phba->params.num_eq_entries);
252         phba->params.num_cq_entries =
253             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
254                                     + BE2_TMFS) / 512) + 1) * 512;
255         phba->params.wrbs_per_cxn = 256;
256 }
257
258 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
259                            unsigned int id, unsigned int clr_interrupt,
260                            unsigned int num_processed,
261                            unsigned char rearm, unsigned char event)
262 {
263         u32 val = 0;
264         val |= id & DB_EQ_RING_ID_MASK;
265         if (rearm)
266                 val |= 1 << DB_EQ_REARM_SHIFT;
267         if (clr_interrupt)
268                 val |= 1 << DB_EQ_CLR_SHIFT;
269         if (event)
270                 val |= 1 << DB_EQ_EVNT_SHIFT;
271         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
272         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
273 }
274
275 /**
276  * be_isr_mcc - The isr routine of the driver.
277  * @irq: Not used
278  * @dev_id: Pointer to host adapter structure
279  */
280 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
281 {
282         struct beiscsi_hba *phba;
283         struct be_eq_entry *eqe = NULL;
284         struct be_queue_info *eq;
285         struct be_queue_info *mcc;
286         unsigned int num_eq_processed;
287         struct be_eq_obj *pbe_eq;
288         unsigned long flags;
289
290         pbe_eq = dev_id;
291         eq = &pbe_eq->q;
292         phba =  pbe_eq->phba;
293         mcc = &phba->ctrl.mcc_obj.cq;
294         eqe = queue_tail_node(eq);
295         if (!eqe)
296                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
297
298         num_eq_processed = 0;
299
300         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
301                                 & EQE_VALID_MASK) {
302                 if (((eqe->dw[offsetof(struct amap_eq_entry,
303                      resource_id) / 32] &
304                      EQE_RESID_MASK) >> 16) == mcc->id) {
305                         spin_lock_irqsave(&phba->isr_lock, flags);
306                         phba->todo_mcc_cq = 1;
307                         spin_unlock_irqrestore(&phba->isr_lock, flags);
308                 }
309                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
310                 queue_tail_inc(eq);
311                 eqe = queue_tail_node(eq);
312                 num_eq_processed++;
313         }
314         if (phba->todo_mcc_cq)
315                 queue_work(phba->wq, &phba->work_cqs);
316         if (num_eq_processed)
317                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
318
319         return IRQ_HANDLED;
320 }
321
322 /**
323  * be_isr_msix - The isr routine of the driver.
324  * @irq: Not used
325  * @dev_id: Pointer to host adapter structure
326  */
327 static irqreturn_t be_isr_msix(int irq, void *dev_id)
328 {
329         struct beiscsi_hba *phba;
330         struct be_eq_entry *eqe = NULL;
331         struct be_queue_info *eq;
332         struct be_queue_info *cq;
333         unsigned int num_eq_processed;
334         struct be_eq_obj *pbe_eq;
335         unsigned long flags;
336
337         pbe_eq = dev_id;
338         eq = &pbe_eq->q;
339         cq = pbe_eq->cq;
340         eqe = queue_tail_node(eq);
341         if (!eqe)
342                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
343
344         phba = pbe_eq->phba;
345         num_eq_processed = 0;
346         if (blk_iopoll_enabled) {
347                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
348                                         & EQE_VALID_MASK) {
349                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
350                                 blk_iopoll_sched(&pbe_eq->iopoll);
351
352                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
353                         queue_tail_inc(eq);
354                         eqe = queue_tail_node(eq);
355                         num_eq_processed++;
356                 }
357                 if (num_eq_processed)
358                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
359
360                 return IRQ_HANDLED;
361         } else {
362                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
363                                                 & EQE_VALID_MASK) {
364                         spin_lock_irqsave(&phba->isr_lock, flags);
365                         phba->todo_cq = 1;
366                         spin_unlock_irqrestore(&phba->isr_lock, flags);
367                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
368                         queue_tail_inc(eq);
369                         eqe = queue_tail_node(eq);
370                         num_eq_processed++;
371                 }
372                 if (phba->todo_cq)
373                         queue_work(phba->wq, &phba->work_cqs);
374
375                 if (num_eq_processed)
376                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
377
378                 return IRQ_HANDLED;
379         }
380 }
381
382 /**
383  * be_isr - The isr routine of the driver.
384  * @irq: Not used
385  * @dev_id: Pointer to host adapter structure
386  */
387 static irqreturn_t be_isr(int irq, void *dev_id)
388 {
389         struct beiscsi_hba *phba;
390         struct hwi_controller *phwi_ctrlr;
391         struct hwi_context_memory *phwi_context;
392         struct be_eq_entry *eqe = NULL;
393         struct be_queue_info *eq;
394         struct be_queue_info *cq;
395         struct be_queue_info *mcc;
396         unsigned long flags, index;
397         unsigned int num_mcceq_processed, num_ioeq_processed;
398         struct be_ctrl_info *ctrl;
399         struct be_eq_obj *pbe_eq;
400         int isr;
401
402         phba = dev_id;
403         ctrl = &phba->ctrl;;
404         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
405                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
406         if (!isr)
407                 return IRQ_NONE;
408
409         phwi_ctrlr = phba->phwi_ctrlr;
410         phwi_context = phwi_ctrlr->phwi_ctxt;
411         pbe_eq = &phwi_context->be_eq[0];
412
413         eq = &phwi_context->be_eq[0].q;
414         mcc = &phba->ctrl.mcc_obj.cq;
415         index = 0;
416         eqe = queue_tail_node(eq);
417         if (!eqe)
418                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
419
420         num_ioeq_processed = 0;
421         num_mcceq_processed = 0;
422         if (blk_iopoll_enabled) {
423                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
424                                         & EQE_VALID_MASK) {
425                         if (((eqe->dw[offsetof(struct amap_eq_entry,
426                              resource_id) / 32] &
427                              EQE_RESID_MASK) >> 16) == mcc->id) {
428                                 spin_lock_irqsave(&phba->isr_lock, flags);
429                                 phba->todo_mcc_cq = 1;
430                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
431                                 num_mcceq_processed++;
432                         } else {
433                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
434                                         blk_iopoll_sched(&pbe_eq->iopoll);
435                                 num_ioeq_processed++;
436                         }
437                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
438                         queue_tail_inc(eq);
439                         eqe = queue_tail_node(eq);
440                 }
441                 if (num_ioeq_processed || num_mcceq_processed) {
442                         if (phba->todo_mcc_cq)
443                                 queue_work(phba->wq, &phba->work_cqs);
444
445                         if ((num_mcceq_processed) && (!num_ioeq_processed))
446                                 hwi_ring_eq_db(phba, eq->id, 0,
447                                               (num_ioeq_processed +
448                                                num_mcceq_processed) , 1, 1);
449                         else
450                                 hwi_ring_eq_db(phba, eq->id, 0,
451                                                (num_ioeq_processed +
452                                                 num_mcceq_processed), 0, 1);
453
454                         return IRQ_HANDLED;
455                 } else
456                         return IRQ_NONE;
457         } else {
458                 cq = &phwi_context->be_cq[0];
459                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
460                                                 & EQE_VALID_MASK) {
461
462                         if (((eqe->dw[offsetof(struct amap_eq_entry,
463                              resource_id) / 32] &
464                              EQE_RESID_MASK) >> 16) != cq->id) {
465                                 spin_lock_irqsave(&phba->isr_lock, flags);
466                                 phba->todo_mcc_cq = 1;
467                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
468                         } else {
469                                 spin_lock_irqsave(&phba->isr_lock, flags);
470                                 phba->todo_cq = 1;
471                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
472                         }
473                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
474                         queue_tail_inc(eq);
475                         eqe = queue_tail_node(eq);
476                         num_ioeq_processed++;
477                 }
478                 if (phba->todo_cq || phba->todo_mcc_cq)
479                         queue_work(phba->wq, &phba->work_cqs);
480
481                 if (num_ioeq_processed) {
482                         hwi_ring_eq_db(phba, eq->id, 0,
483                                        num_ioeq_processed, 1, 1);
484                         return IRQ_HANDLED;
485                 } else
486                         return IRQ_NONE;
487         }
488 }
489
490 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
491 {
492         struct pci_dev *pcidev = phba->pcidev;
493         struct hwi_controller *phwi_ctrlr;
494         struct hwi_context_memory *phwi_context;
495         int ret, msix_vec, i = 0;
496         char desc[32];
497
498         phwi_ctrlr = phba->phwi_ctrlr;
499         phwi_context = phwi_ctrlr->phwi_ctxt;
500
501         if (phba->msix_enabled) {
502                 for (i = 0; i < phba->num_cpus; i++) {
503                         sprintf(desc, "beiscsi_msix_%04x", i);
504                         msix_vec = phba->msix_entries[i].vector;
505                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
506                                           &phwi_context->be_eq[i]);
507                 }
508                 msix_vec = phba->msix_entries[i].vector;
509                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
510                                   &phwi_context->be_eq[i]);
511         } else {
512                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
513                                   "beiscsi", phba);
514                 if (ret) {
515                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
516                                      "Failed to register irq\\n");
517                         return ret;
518                 }
519         }
520         return 0;
521 }
522
523 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
524                            unsigned int id, unsigned int num_processed,
525                            unsigned char rearm, unsigned char event)
526 {
527         u32 val = 0;
528         val |= id & DB_CQ_RING_ID_MASK;
529         if (rearm)
530                 val |= 1 << DB_CQ_REARM_SHIFT;
531         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
532         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
533 }
534
535 static unsigned int
536 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
537                           struct beiscsi_hba *phba,
538                           unsigned short cid,
539                           struct pdu_base *ppdu,
540                           unsigned long pdu_len,
541                           void *pbuffer, unsigned long buf_len)
542 {
543         struct iscsi_conn *conn = beiscsi_conn->conn;
544         struct iscsi_session *session = conn->session;
545         struct iscsi_task *task;
546         struct beiscsi_io_task *io_task;
547         struct iscsi_hdr *login_hdr;
548
549         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
550                                                 PDUBASE_OPCODE_MASK) {
551         case ISCSI_OP_NOOP_IN:
552                 pbuffer = NULL;
553                 buf_len = 0;
554                 break;
555         case ISCSI_OP_ASYNC_EVENT:
556                 break;
557         case ISCSI_OP_REJECT:
558                 WARN_ON(!pbuffer);
559                 WARN_ON(!(buf_len == 48));
560                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
561                 break;
562         case ISCSI_OP_LOGIN_RSP:
563         case ISCSI_OP_TEXT_RSP:
564                 task = conn->login_task;
565                 io_task = task->dd_data;
566                 login_hdr = (struct iscsi_hdr *)ppdu;
567                 login_hdr->itt = io_task->libiscsi_itt;
568                 break;
569         default:
570                 shost_printk(KERN_WARNING, phba->shost,
571                              "Unrecognized opcode 0x%x in async msg \n",
572                              (ppdu->
573                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
574                                                 & PDUBASE_OPCODE_MASK));
575                 return 1;
576         }
577
578         spin_lock_bh(&session->lock);
579         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
580         spin_unlock_bh(&session->lock);
581         return 0;
582 }
583
584 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
585 {
586         struct sgl_handle *psgl_handle;
587
588         if (phba->io_sgl_hndl_avbl) {
589                 SE_DEBUG(DBG_LVL_8,
590                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
591                          phba->io_sgl_alloc_index);
592                 psgl_handle = phba->io_sgl_hndl_base[phba->
593                                                 io_sgl_alloc_index];
594                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
595                 phba->io_sgl_hndl_avbl--;
596                 if (phba->io_sgl_alloc_index == (phba->params.
597                                                  ios_per_ctrl - 1))
598                         phba->io_sgl_alloc_index = 0;
599                 else
600                         phba->io_sgl_alloc_index++;
601         } else
602                 psgl_handle = NULL;
603         return psgl_handle;
604 }
605
606 static void
607 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
608 {
609         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
610                  phba->io_sgl_free_index);
611         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
612                 /*
613                  * this can happen if clean_task is called on a task that
614                  * failed in xmit_task or alloc_pdu.
615                  */
616                  SE_DEBUG(DBG_LVL_8,
617                          "Double Free in IO SGL io_sgl_free_index=%d,"
618                          "value there=%p \n", phba->io_sgl_free_index,
619                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
620                 return;
621         }
622         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
623         phba->io_sgl_hndl_avbl++;
624         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
625                 phba->io_sgl_free_index = 0;
626         else
627                 phba->io_sgl_free_index++;
628 }
629
630 /**
631  * alloc_wrb_handle - To allocate a wrb handle
632  * @phba: The hba pointer
633  * @cid: The cid to use for allocation
634  *
635  * This happens under session_lock until submission to chip
636  */
637 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
638 {
639         struct hwi_wrb_context *pwrb_context;
640         struct hwi_controller *phwi_ctrlr;
641         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
642
643         phwi_ctrlr = phba->phwi_ctrlr;
644         pwrb_context = &phwi_ctrlr->wrb_context[cid];
645         if (pwrb_context->wrb_handles_available >= 2) {
646                 pwrb_handle = pwrb_context->pwrb_handle_base[
647                                             pwrb_context->alloc_index];
648                 pwrb_context->wrb_handles_available--;
649                 if (pwrb_context->alloc_index ==
650                                                 (phba->params.wrbs_per_cxn - 1))
651                         pwrb_context->alloc_index = 0;
652                 else
653                         pwrb_context->alloc_index++;
654                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
655                                                 pwrb_context->alloc_index];
656                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
657         } else
658                 pwrb_handle = NULL;
659         return pwrb_handle;
660 }
661
662 /**
663  * free_wrb_handle - To free the wrb handle back to pool
664  * @phba: The hba pointer
665  * @pwrb_context: The context to free from
666  * @pwrb_handle: The wrb_handle to free
667  *
668  * This happens under session_lock until submission to chip
669  */
670 static void
671 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
672                 struct wrb_handle *pwrb_handle)
673 {
674         if (!ring_mode)
675                 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
676                                                pwrb_handle;
677         pwrb_context->wrb_handles_available++;
678         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
679                 pwrb_context->free_index = 0;
680         else
681                 pwrb_context->free_index++;
682
683         SE_DEBUG(DBG_LVL_8,
684                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
685                  "wrb_handles_available=%d \n",
686                  pwrb_handle, pwrb_context->free_index,
687                  pwrb_context->wrb_handles_available);
688 }
689
690 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
691 {
692         struct sgl_handle *psgl_handle;
693
694         if (phba->eh_sgl_hndl_avbl) {
695                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
696                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
697                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
698                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
699                 phba->eh_sgl_hndl_avbl--;
700                 if (phba->eh_sgl_alloc_index ==
701                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
702                      1))
703                         phba->eh_sgl_alloc_index = 0;
704                 else
705                         phba->eh_sgl_alloc_index++;
706         } else
707                 psgl_handle = NULL;
708         return psgl_handle;
709 }
710
711 void
712 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
713 {
714
715         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
716                              phba->eh_sgl_free_index);
717         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
718                 /*
719                  * this can happen if clean_task is called on a task that
720                  * failed in xmit_task or alloc_pdu.
721                  */
722                 SE_DEBUG(DBG_LVL_8,
723                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
724                          phba->eh_sgl_free_index);
725                 return;
726         }
727         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
728         phba->eh_sgl_hndl_avbl++;
729         if (phba->eh_sgl_free_index ==
730             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
731                 phba->eh_sgl_free_index = 0;
732         else
733                 phba->eh_sgl_free_index++;
734 }
735
736 static void
737 be_complete_io(struct beiscsi_conn *beiscsi_conn,
738                struct iscsi_task *task, struct sol_cqe *psol)
739 {
740         struct beiscsi_io_task *io_task = task->dd_data;
741         struct be_status_bhs *sts_bhs =
742                                 (struct be_status_bhs *)io_task->cmd_bhs;
743         struct iscsi_conn *conn = beiscsi_conn->conn;
744         unsigned int sense_len;
745         unsigned char *sense;
746         u32 resid = 0, exp_cmdsn, max_cmdsn;
747         u8 rsp, status, flags;
748
749         exp_cmdsn = (psol->
750                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
751                         & SOL_EXP_CMD_SN_MASK);
752         max_cmdsn = ((psol->
753                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
754                         & SOL_EXP_CMD_SN_MASK) +
755                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
756                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
757         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
758                                                 & SOL_RESP_MASK) >> 16);
759         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
760                                                 & SOL_STS_MASK) >> 8);
761         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
762                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
763
764         task->sc->result = (DID_OK << 16) | status;
765         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
766                 task->sc->result = DID_ERROR << 16;
767                 goto unmap;
768         }
769
770         /* bidi not initially supported */
771         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
772                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
773                                 32] & SOL_RES_CNT_MASK);
774
775                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
776                         task->sc->result = DID_ERROR << 16;
777
778                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
779                         scsi_set_resid(task->sc, resid);
780                         if (!status && (scsi_bufflen(task->sc) - resid <
781                             task->sc->underflow))
782                                 task->sc->result = DID_ERROR << 16;
783                 }
784         }
785
786         if (status == SAM_STAT_CHECK_CONDITION) {
787                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
788                 sense = sts_bhs->sense_info + sizeof(unsigned short);
789                 sense_len =  cpu_to_be16(*slen);
790                 memcpy(task->sc->sense_buffer, sense,
791                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
792         }
793
794         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
795                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796                                                         & SOL_RES_CNT_MASK)
797                          conn->rxdata_octets += (psol->
798                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
799                              & SOL_RES_CNT_MASK);
800         }
801 unmap:
802         scsi_dma_unmap(io_task->scsi_cmnd);
803         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
804 }
805
806 static void
807 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
808                    struct iscsi_task *task, struct sol_cqe *psol)
809 {
810         struct iscsi_logout_rsp *hdr;
811         struct beiscsi_io_task *io_task = task->dd_data;
812         struct iscsi_conn *conn = beiscsi_conn->conn;
813
814         hdr = (struct iscsi_logout_rsp *)task->hdr;
815         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
816         hdr->t2wait = 5;
817         hdr->t2retain = 0;
818         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
819                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
820         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
821                                         32] & SOL_RESP_MASK);
822         hdr->exp_cmdsn = cpu_to_be32(psol->
823                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
824                                         & SOL_EXP_CMD_SN_MASK);
825         hdr->max_cmdsn = be32_to_cpu((psol->
826                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
827                                         & SOL_EXP_CMD_SN_MASK) +
828                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
829                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
830         hdr->dlength[0] = 0;
831         hdr->dlength[1] = 0;
832         hdr->dlength[2] = 0;
833         hdr->hlength = 0;
834         hdr->itt = io_task->libiscsi_itt;
835         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
836 }
837
838 static void
839 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
840                 struct iscsi_task *task, struct sol_cqe *psol)
841 {
842         struct iscsi_tm_rsp *hdr;
843         struct iscsi_conn *conn = beiscsi_conn->conn;
844         struct beiscsi_io_task *io_task = task->dd_data;
845
846         hdr = (struct iscsi_tm_rsp *)task->hdr;
847         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
848         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
849                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
850         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
851                                         32] & SOL_RESP_MASK);
852         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
853                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
854         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
855                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
856                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
857                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
858         hdr->itt = io_task->libiscsi_itt;
859         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
860 }
861
862 static void
863 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
864                        struct beiscsi_hba *phba, struct sol_cqe *psol)
865 {
866         struct hwi_wrb_context *pwrb_context;
867         struct wrb_handle *pwrb_handle = NULL;
868         struct sgl_handle *psgl_handle = NULL;
869         struct hwi_controller *phwi_ctrlr;
870         struct iscsi_task *task;
871         struct beiscsi_io_task *io_task;
872         struct iscsi_conn *conn = beiscsi_conn->conn;
873         struct iscsi_session *session = conn->session;
874
875         phwi_ctrlr = phba->phwi_ctrlr;
876         if (ring_mode) {
877                 psgl_handle = phba->sgl_hndl_array[((psol->
878                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
879                                 32] & SOL_ICD_INDEX_MASK) >> 6)];
880                 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
881                 task = psgl_handle->task;
882                 pwrb_handle = NULL;
883         } else {
884                 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
885                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
886                                 SOL_CID_MASK) >> 6) -
887                                 phba->fw_config.iscsi_cid_start];
888                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
889                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
890                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
891                 task = pwrb_handle->pio_handle;
892         }
893
894         io_task = task->dd_data;
895         spin_lock(&phba->mgmt_sgl_lock);
896         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
897         spin_unlock(&phba->mgmt_sgl_lock);
898         spin_lock_bh(&session->lock);
899         free_wrb_handle(phba, pwrb_context, pwrb_handle);
900         spin_unlock_bh(&session->lock);
901 }
902
903 static void
904 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
905                        struct iscsi_task *task, struct sol_cqe *psol)
906 {
907         struct iscsi_nopin *hdr;
908         struct iscsi_conn *conn = beiscsi_conn->conn;
909         struct beiscsi_io_task *io_task = task->dd_data;
910
911         hdr = (struct iscsi_nopin *)task->hdr;
912         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
913                         & SOL_FLAGS_MASK) >> 24) | 0x80;
914         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
915                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
916         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
917                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
918                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
919                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
920         hdr->opcode = ISCSI_OP_NOOP_IN;
921         hdr->itt = io_task->libiscsi_itt;
922         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
923 }
924
925 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
926                              struct beiscsi_hba *phba, struct sol_cqe *psol)
927 {
928         struct hwi_wrb_context *pwrb_context;
929         struct wrb_handle *pwrb_handle;
930         struct iscsi_wrb *pwrb = NULL;
931         struct hwi_controller *phwi_ctrlr;
932         struct iscsi_task *task;
933         struct sgl_handle *psgl_handle = NULL;
934         unsigned int type;
935         struct iscsi_conn *conn = beiscsi_conn->conn;
936         struct iscsi_session *session = conn->session;
937
938         phwi_ctrlr = phba->phwi_ctrlr;
939         if (ring_mode) {
940                 psgl_handle = phba->sgl_hndl_array[((psol->
941                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
942                               32] & SOL_ICD_INDEX_MASK) >> 6)];
943                 task = psgl_handle->task;
944                 type = psgl_handle->type;
945         } else {
946                 pwrb_context = &phwi_ctrlr->
947                                 wrb_context[((psol->dw[offsetof
948                                 (struct amap_sol_cqe, cid) / 32]
949                                 & SOL_CID_MASK) >> 6) -
950                                 phba->fw_config.iscsi_cid_start];
951                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
952                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
953                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
954                 task = pwrb_handle->pio_handle;
955                 pwrb = pwrb_handle->pwrb;
956                 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
957                          WRB_TYPE_MASK) >> 28;
958         }
959         spin_lock_bh(&session->lock);
960         switch (type) {
961         case HWH_TYPE_IO:
962         case HWH_TYPE_IO_RD:
963                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
964                     ISCSI_OP_NOOP_OUT) {
965                         be_complete_nopin_resp(beiscsi_conn, task, psol);
966                 } else
967                         be_complete_io(beiscsi_conn, task, psol);
968                 break;
969
970         case HWH_TYPE_LOGOUT:
971                 be_complete_logout(beiscsi_conn, task, psol);
972                 break;
973
974         case HWH_TYPE_LOGIN:
975                 SE_DEBUG(DBG_LVL_1,
976                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
977                          "- Solicited path \n");
978                 break;
979
980         case HWH_TYPE_TMF:
981                 be_complete_tmf(beiscsi_conn, task, psol);
982                 break;
983
984         case HWH_TYPE_NOP:
985                 be_complete_nopin_resp(beiscsi_conn, task, psol);
986                 break;
987
988         default:
989                 if (ring_mode)
990                         shost_printk(KERN_WARNING, phba->shost,
991                                 "In hwi_complete_cmd, unknown type = %d"
992                                 "icd_index 0x%x CID 0x%x\n", type,
993                                 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
994                                 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
995                                 psgl_handle->cid);
996                 else
997                         shost_printk(KERN_WARNING, phba->shost,
998                                 "In hwi_complete_cmd, unknown type = %d"
999                                 "wrb_index 0x%x CID 0x%x\n", type,
1000                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1001                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1002                                 ((psol->dw[offsetof(struct amap_sol_cqe,
1003                                 cid) / 32] & SOL_CID_MASK) >> 6));
1004                 break;
1005         }
1006
1007         spin_unlock_bh(&session->lock);
1008 }
1009
1010 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1011                                           *pasync_ctx, unsigned int is_header,
1012                                           unsigned int host_write_ptr)
1013 {
1014         if (is_header)
1015                 return &pasync_ctx->async_entry[host_write_ptr].
1016                     header_busy_list;
1017         else
1018                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1019 }
1020
1021 static struct async_pdu_handle *
1022 hwi_get_async_handle(struct beiscsi_hba *phba,
1023                      struct beiscsi_conn *beiscsi_conn,
1024                      struct hwi_async_pdu_context *pasync_ctx,
1025                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1026 {
1027         struct be_bus_address phys_addr;
1028         struct list_head *pbusy_list;
1029         struct async_pdu_handle *pasync_handle = NULL;
1030         int buffer_len = 0;
1031         unsigned char buffer_index = -1;
1032         unsigned char is_header = 0;
1033
1034         phys_addr.u.a32.address_lo =
1035             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1036             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1037                                                 & PDUCQE_DPL_MASK) >> 16);
1038         phys_addr.u.a32.address_hi =
1039             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1040
1041         phys_addr.u.a64.address =
1042                         *((unsigned long long *)(&phys_addr.u.a64.address));
1043
1044         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1045                         & PDUCQE_CODE_MASK) {
1046         case UNSOL_HDR_NOTIFY:
1047                 is_header = 1;
1048
1049                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1050                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1051                         index) / 32] & PDUCQE_INDEX_MASK));
1052
1053                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1054                                 pasync_ctx->async_header.pa_base.u.a64.address);
1055
1056                 buffer_index = buffer_len /
1057                                 pasync_ctx->async_header.buffer_size;
1058
1059                 break;
1060         case UNSOL_DATA_NOTIFY:
1061                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1062                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1063                                         index) / 32] & PDUCQE_INDEX_MASK));
1064                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1065                                         pasync_ctx->async_data.pa_base.u.
1066                                         a64.address);
1067                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1068                 break;
1069         default:
1070                 pbusy_list = NULL;
1071                 shost_printk(KERN_WARNING, phba->shost,
1072                         "Unexpected code=%d \n",
1073                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1074                                         code) / 32] & PDUCQE_CODE_MASK);
1075                 return NULL;
1076         }
1077
1078         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1079         WARN_ON(list_empty(pbusy_list));
1080         list_for_each_entry(pasync_handle, pbusy_list, link) {
1081                 WARN_ON(pasync_handle->consumed);
1082                 if (pasync_handle->index == buffer_index)
1083                         break;
1084         }
1085
1086         WARN_ON(!pasync_handle);
1087
1088         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1089                                              phba->fw_config.iscsi_cid_start;
1090         pasync_handle->is_header = is_header;
1091         pasync_handle->buffer_len = ((pdpdu_cqe->
1092                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1093                         & PDUCQE_DPL_MASK) >> 16);
1094
1095         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1096                         index) / 32] & PDUCQE_INDEX_MASK);
1097         return pasync_handle;
1098 }
1099
1100 static unsigned int
1101 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1102                            unsigned int is_header, unsigned int cq_index)
1103 {
1104         struct list_head *pbusy_list;
1105         struct async_pdu_handle *pasync_handle;
1106         unsigned int num_entries, writables = 0;
1107         unsigned int *pep_read_ptr, *pwritables;
1108
1109
1110         if (is_header) {
1111                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1112                 pwritables = &pasync_ctx->async_header.writables;
1113                 num_entries = pasync_ctx->async_header.num_entries;
1114         } else {
1115                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1116                 pwritables = &pasync_ctx->async_data.writables;
1117                 num_entries = pasync_ctx->async_data.num_entries;
1118         }
1119
1120         while ((*pep_read_ptr) != cq_index) {
1121                 (*pep_read_ptr)++;
1122                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1123
1124                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1125                                                      *pep_read_ptr);
1126                 if (writables == 0)
1127                         WARN_ON(list_empty(pbusy_list));
1128
1129                 if (!list_empty(pbusy_list)) {
1130                         pasync_handle = list_entry(pbusy_list->next,
1131                                                    struct async_pdu_handle,
1132                                                    link);
1133                         WARN_ON(!pasync_handle);
1134                         pasync_handle->consumed = 1;
1135                 }
1136
1137                 writables++;
1138         }
1139
1140         if (!writables) {
1141                 SE_DEBUG(DBG_LVL_1,
1142                          "Duplicate notification received - index 0x%x!!\n",
1143                          cq_index);
1144                 WARN_ON(1);
1145         }
1146
1147         *pwritables = *pwritables + writables;
1148         return 0;
1149 }
1150
1151 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1152                                        unsigned int cri)
1153 {
1154         struct hwi_controller *phwi_ctrlr;
1155         struct hwi_async_pdu_context *pasync_ctx;
1156         struct async_pdu_handle *pasync_handle, *tmp_handle;
1157         struct list_head *plist;
1158         unsigned int i = 0;
1159
1160         phwi_ctrlr = phba->phwi_ctrlr;
1161         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1162
1163         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1164
1165         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1166                 list_del(&pasync_handle->link);
1167
1168                 if (i == 0) {
1169                         list_add_tail(&pasync_handle->link,
1170                                       &pasync_ctx->async_header.free_list);
1171                         pasync_ctx->async_header.free_entries++;
1172                         i++;
1173                 } else {
1174                         list_add_tail(&pasync_handle->link,
1175                                       &pasync_ctx->async_data.free_list);
1176                         pasync_ctx->async_data.free_entries++;
1177                         i++;
1178                 }
1179         }
1180
1181         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1182         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1183         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1184         return 0;
1185 }
1186
1187 static struct phys_addr *
1188 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1189                      unsigned int is_header, unsigned int host_write_ptr)
1190 {
1191         struct phys_addr *pasync_sge = NULL;
1192
1193         if (is_header)
1194                 pasync_sge = pasync_ctx->async_header.ring_base;
1195         else
1196                 pasync_sge = pasync_ctx->async_data.ring_base;
1197
1198         return pasync_sge + host_write_ptr;
1199 }
1200
1201 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1202                                    unsigned int is_header)
1203 {
1204         struct hwi_controller *phwi_ctrlr;
1205         struct hwi_async_pdu_context *pasync_ctx;
1206         struct async_pdu_handle *pasync_handle;
1207         struct list_head *pfree_link, *pbusy_list;
1208         struct phys_addr *pasync_sge;
1209         unsigned int ring_id, num_entries;
1210         unsigned int host_write_num;
1211         unsigned int writables;
1212         unsigned int i = 0;
1213         u32 doorbell = 0;
1214
1215         phwi_ctrlr = phba->phwi_ctrlr;
1216         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1217
1218         if (is_header) {
1219                 num_entries = pasync_ctx->async_header.num_entries;
1220                 writables = min(pasync_ctx->async_header.writables,
1221                                 pasync_ctx->async_header.free_entries);
1222                 pfree_link = pasync_ctx->async_header.free_list.next;
1223                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1224                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1225         } else {
1226                 num_entries = pasync_ctx->async_data.num_entries;
1227                 writables = min(pasync_ctx->async_data.writables,
1228                                 pasync_ctx->async_data.free_entries);
1229                 pfree_link = pasync_ctx->async_data.free_list.next;
1230                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1231                 ring_id = phwi_ctrlr->default_pdu_data.id;
1232         }
1233
1234         writables = (writables / 8) * 8;
1235         if (writables) {
1236                 for (i = 0; i < writables; i++) {
1237                         pbusy_list =
1238                             hwi_get_async_busy_list(pasync_ctx, is_header,
1239                                                     host_write_num);
1240                         pasync_handle =
1241                             list_entry(pfree_link, struct async_pdu_handle,
1242                                                                 link);
1243                         WARN_ON(!pasync_handle);
1244                         pasync_handle->consumed = 0;
1245
1246                         pfree_link = pfree_link->next;
1247
1248                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1249                                                 is_header, host_write_num);
1250
1251                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1252                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1253
1254                         list_move(&pasync_handle->link, pbusy_list);
1255
1256                         host_write_num++;
1257                         host_write_num = host_write_num % num_entries;
1258                 }
1259
1260                 if (is_header) {
1261                         pasync_ctx->async_header.host_write_ptr =
1262                                                         host_write_num;
1263                         pasync_ctx->async_header.free_entries -= writables;
1264                         pasync_ctx->async_header.writables -= writables;
1265                         pasync_ctx->async_header.busy_entries += writables;
1266                 } else {
1267                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1268                         pasync_ctx->async_data.free_entries -= writables;
1269                         pasync_ctx->async_data.writables -= writables;
1270                         pasync_ctx->async_data.busy_entries += writables;
1271                 }
1272
1273                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1274                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1275                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1276                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1277                                         << DB_DEF_PDU_CQPROC_SHIFT;
1278
1279                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1280         }
1281 }
1282
1283 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1284                                          struct beiscsi_conn *beiscsi_conn,
1285                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1286 {
1287         struct hwi_controller *phwi_ctrlr;
1288         struct hwi_async_pdu_context *pasync_ctx;
1289         struct async_pdu_handle *pasync_handle = NULL;
1290         unsigned int cq_index = -1;
1291
1292         phwi_ctrlr = phba->phwi_ctrlr;
1293         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1294
1295         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1296                                              pdpdu_cqe, &cq_index);
1297         BUG_ON(pasync_handle->is_header != 0);
1298         if (pasync_handle->consumed == 0)
1299                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1300                                            cq_index);
1301
1302         hwi_free_async_msg(phba, pasync_handle->cri);
1303         hwi_post_async_buffers(phba, pasync_handle->is_header);
1304 }
1305
1306 static unsigned int
1307 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1308                   struct beiscsi_hba *phba,
1309                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1310 {
1311         struct list_head *plist;
1312         struct async_pdu_handle *pasync_handle;
1313         void *phdr = NULL;
1314         unsigned int hdr_len = 0, buf_len = 0;
1315         unsigned int status, index = 0, offset = 0;
1316         void *pfirst_buffer = NULL;
1317         unsigned int num_buf = 0;
1318
1319         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1320
1321         list_for_each_entry(pasync_handle, plist, link) {
1322                 if (index == 0) {
1323                         phdr = pasync_handle->pbuffer;
1324                         hdr_len = pasync_handle->buffer_len;
1325                 } else {
1326                         buf_len = pasync_handle->buffer_len;
1327                         if (!num_buf) {
1328                                 pfirst_buffer = pasync_handle->pbuffer;
1329                                 num_buf++;
1330                         }
1331                         memcpy(pfirst_buffer + offset,
1332                                pasync_handle->pbuffer, buf_len);
1333                         offset = buf_len;
1334                 }
1335                 index++;
1336         }
1337
1338         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1339                                            (beiscsi_conn->beiscsi_conn_cid -
1340                                             phba->fw_config.iscsi_cid_start),
1341                                             phdr, hdr_len, pfirst_buffer,
1342                                             buf_len);
1343
1344         if (status == 0)
1345                 hwi_free_async_msg(phba, cri);
1346         return 0;
1347 }
1348
1349 static unsigned int
1350 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1351                      struct beiscsi_hba *phba,
1352                      struct async_pdu_handle *pasync_handle)
1353 {
1354         struct hwi_async_pdu_context *pasync_ctx;
1355         struct hwi_controller *phwi_ctrlr;
1356         unsigned int bytes_needed = 0, status = 0;
1357         unsigned short cri = pasync_handle->cri;
1358         struct pdu_base *ppdu;
1359
1360         phwi_ctrlr = phba->phwi_ctrlr;
1361         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1362
1363         list_del(&pasync_handle->link);
1364         if (pasync_handle->is_header) {
1365                 pasync_ctx->async_header.busy_entries--;
1366                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1367                         hwi_free_async_msg(phba, cri);
1368                         BUG();
1369                 }
1370
1371                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1372                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1373                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1374                                 (unsigned short)pasync_handle->buffer_len;
1375                 list_add_tail(&pasync_handle->link,
1376                               &pasync_ctx->async_entry[cri].wait_queue.list);
1377
1378                 ppdu = pasync_handle->pbuffer;
1379                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1380                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1381                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1382                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1383                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1384
1385                 if (status == 0) {
1386                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1387                             bytes_needed;
1388
1389                         if (bytes_needed == 0)
1390                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1391                                                            pasync_ctx, cri);
1392                 }
1393         } else {
1394                 pasync_ctx->async_data.busy_entries--;
1395                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1396                         list_add_tail(&pasync_handle->link,
1397                                       &pasync_ctx->async_entry[cri].wait_queue.
1398                                       list);
1399                         pasync_ctx->async_entry[cri].wait_queue.
1400                                 bytes_received +=
1401                                 (unsigned short)pasync_handle->buffer_len;
1402
1403                         if (pasync_ctx->async_entry[cri].wait_queue.
1404                             bytes_received >=
1405                             pasync_ctx->async_entry[cri].wait_queue.
1406                             bytes_needed)
1407                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1408                                                            pasync_ctx, cri);
1409                 }
1410         }
1411         return status;
1412 }
1413
1414 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1415                                          struct beiscsi_hba *phba,
1416                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1417 {
1418         struct hwi_controller *phwi_ctrlr;
1419         struct hwi_async_pdu_context *pasync_ctx;
1420         struct async_pdu_handle *pasync_handle = NULL;
1421         unsigned int cq_index = -1;
1422
1423         phwi_ctrlr = phba->phwi_ctrlr;
1424         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1425         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1426                                              pdpdu_cqe, &cq_index);
1427
1428         if (pasync_handle->consumed == 0)
1429                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1430                                            cq_index);
1431         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1432         hwi_post_async_buffers(phba, pasync_handle->is_header);
1433 }
1434
1435 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1436 {
1437         struct be_queue_info *mcc_cq;
1438         struct  be_mcc_compl *mcc_compl;
1439         unsigned int num_processed = 0;
1440
1441         mcc_cq = &phba->ctrl.mcc_obj.cq;
1442         mcc_compl = queue_tail_node(mcc_cq);
1443         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1444         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1445
1446                 if (num_processed >= 32) {
1447                         hwi_ring_cq_db(phba, mcc_cq->id,
1448                                         num_processed, 0, 0);
1449                         num_processed = 0;
1450                 }
1451                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1452                         /* Interpret flags as an async trailer */
1453                         if (is_link_state_evt(mcc_compl->flags))
1454                                 /* Interpret compl as a async link evt */
1455                                 beiscsi_async_link_state_process(phba,
1456                                 (struct be_async_event_link_state *) mcc_compl);
1457                         else
1458                                 SE_DEBUG(DBG_LVL_1,
1459                                         " Unsupported Async Event, flags"
1460                                         " = 0x%08x \n", mcc_compl->flags);
1461                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1462                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1463                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1464                 }
1465
1466                 mcc_compl->flags = 0;
1467                 queue_tail_inc(mcc_cq);
1468                 mcc_compl = queue_tail_node(mcc_cq);
1469                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1470                 num_processed++;
1471         }
1472
1473         if (num_processed > 0)
1474                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1475
1476 }
1477
1478 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1479 {
1480         struct be_queue_info *cq;
1481         struct sol_cqe *sol;
1482         struct dmsg_cqe *dmsg;
1483         unsigned int num_processed = 0;
1484         unsigned int tot_nump = 0;
1485         struct beiscsi_conn *beiscsi_conn;
1486         struct sgl_handle *psgl_handle = NULL;
1487         struct beiscsi_endpoint *beiscsi_ep;
1488         struct iscsi_endpoint *ep;
1489         struct beiscsi_hba *phba;
1490
1491         cq = pbe_eq->cq;
1492         sol = queue_tail_node(cq);
1493         phba = pbe_eq->phba;
1494
1495         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1496                CQE_VALID_MASK) {
1497                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1498
1499                 if (ring_mode) {
1500                         psgl_handle = phba->sgl_hndl_array[((sol->
1501                                       dw[offsetof(struct amap_sol_cqe_ring,
1502                                       icd_index) / 32] & SOL_ICD_INDEX_MASK)
1503                                       >> 6)];
1504                         ep = phba->ep_array[psgl_handle->cid];
1505                 } else {
1506                         ep = phba->ep_array[(u32) ((sol->
1507                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1508                                    SOL_CID_MASK) >> 6) -
1509                                    phba->fw_config.iscsi_cid_start];
1510                 }
1511                 beiscsi_ep = ep->dd_data;
1512                 beiscsi_conn = beiscsi_ep->conn;
1513
1514                 if (num_processed >= 32) {
1515                         hwi_ring_cq_db(phba, cq->id,
1516                                         num_processed, 0, 0);
1517                         tot_nump += num_processed;
1518                         num_processed = 0;
1519                 }
1520
1521                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1522                         32] & CQE_CODE_MASK) {
1523                 case SOL_CMD_COMPLETE:
1524                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1525                         break;
1526                 case DRIVERMSG_NOTIFY:
1527                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1528                         dmsg = (struct dmsg_cqe *)sol;
1529                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1530                         break;
1531                 case UNSOL_HDR_NOTIFY:
1532                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1533                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1534                                              (struct i_t_dpdu_cqe *)sol);
1535                         break;
1536                 case UNSOL_DATA_NOTIFY:
1537                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1538                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1539                                              (struct i_t_dpdu_cqe *)sol);
1540                         break;
1541                 case CXN_INVALIDATE_INDEX_NOTIFY:
1542                 case CMD_INVALIDATED_NOTIFY:
1543                 case CXN_INVALIDATE_NOTIFY:
1544                         SE_DEBUG(DBG_LVL_1,
1545                                  "Ignoring CQ Error notification for cmd/cxn"
1546                                  "invalidate\n");
1547                         break;
1548                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1549                 case CMD_KILLED_INVALID_STATSN_RCVD:
1550                 case CMD_KILLED_INVALID_R2T_RCVD:
1551                 case CMD_CXN_KILLED_LUN_INVALID:
1552                 case CMD_CXN_KILLED_ICD_INVALID:
1553                 case CMD_CXN_KILLED_ITT_INVALID:
1554                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1555                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1556                         if (ring_mode) {
1557                                 SE_DEBUG(DBG_LVL_1,
1558                                  "CQ Error notification for cmd.. "
1559                                  "code %d cid 0x%x\n",
1560                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1561                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1562                         } else {
1563                                 SE_DEBUG(DBG_LVL_1,
1564                                  "CQ Error notification for cmd.. "
1565                                  "code %d cid 0x%x\n",
1566                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1567                                  32] & CQE_CODE_MASK,
1568                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1569                                  32] & SOL_CID_MASK));
1570                         }
1571                         break;
1572                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1573                         SE_DEBUG(DBG_LVL_1,
1574                                  "Digest error on def pdu ring, dropping..\n");
1575                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1576                                              (struct i_t_dpdu_cqe *) sol);
1577                         break;
1578                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1579                 case CXN_KILLED_BURST_LEN_MISMATCH:
1580                 case CXN_KILLED_AHS_RCVD:
1581                 case CXN_KILLED_HDR_DIGEST_ERR:
1582                 case CXN_KILLED_UNKNOWN_HDR:
1583                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1584                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1585                 case CXN_KILLED_TIMED_OUT:
1586                 case CXN_KILLED_FIN_RCVD:
1587                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1588                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1589                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1590                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1591                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1592                         if (ring_mode) {
1593                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1594                                  "0x%x...\n",
1595                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1596                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1597                         } else {
1598                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1599                                  "0x%x...\n",
1600                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1601                                  32] & CQE_CODE_MASK,
1602                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1603                                  32] & CQE_CID_MASK));
1604                         }
1605                         iscsi_conn_failure(beiscsi_conn->conn,
1606                                            ISCSI_ERR_CONN_FAILED);
1607                         break;
1608                 case CXN_KILLED_RST_SENT:
1609                 case CXN_KILLED_RST_RCVD:
1610                         if (ring_mode) {
1611                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1612                                 "received/sent on CID 0x%x...\n",
1613                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1614                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1615                         } else {
1616                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1617                                 "received/sent on CID 0x%x...\n",
1618                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1619                                  32] & CQE_CODE_MASK,
1620                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1621                                  32] & CQE_CID_MASK));
1622                         }
1623                         iscsi_conn_failure(beiscsi_conn->conn,
1624                                            ISCSI_ERR_CONN_FAILED);
1625                         break;
1626                 default:
1627                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1628                                  "received on CID 0x%x...\n",
1629                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1630                                  32] & CQE_CODE_MASK,
1631                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1632                                  32] & CQE_CID_MASK));
1633                         break;
1634                 }
1635
1636                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1637                 queue_tail_inc(cq);
1638                 sol = queue_tail_node(cq);
1639                 num_processed++;
1640         }
1641
1642         if (num_processed > 0) {
1643                 tot_nump += num_processed;
1644                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1645         }
1646         return tot_nump;
1647 }
1648
1649 void beiscsi_process_all_cqs(struct work_struct *work)
1650 {
1651         unsigned long flags;
1652         struct hwi_controller *phwi_ctrlr;
1653         struct hwi_context_memory *phwi_context;
1654         struct be_eq_obj *pbe_eq;
1655         struct beiscsi_hba *phba =
1656             container_of(work, struct beiscsi_hba, work_cqs);
1657
1658         phwi_ctrlr = phba->phwi_ctrlr;
1659         phwi_context = phwi_ctrlr->phwi_ctxt;
1660         if (phba->msix_enabled)
1661                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1662         else
1663                 pbe_eq = &phwi_context->be_eq[0];
1664
1665         if (phba->todo_mcc_cq) {
1666                 spin_lock_irqsave(&phba->isr_lock, flags);
1667                 phba->todo_mcc_cq = 0;
1668                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1669                 beiscsi_process_mcc_isr(phba);
1670         }
1671
1672         if (phba->todo_cq) {
1673                 spin_lock_irqsave(&phba->isr_lock, flags);
1674                 phba->todo_cq = 0;
1675                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1676                 beiscsi_process_cq(pbe_eq);
1677         }
1678 }
1679
1680 static int be_iopoll(struct blk_iopoll *iop, int budget)
1681 {
1682         static unsigned int ret;
1683         struct beiscsi_hba *phba;
1684         struct be_eq_obj *pbe_eq;
1685
1686         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1687         ret = beiscsi_process_cq(pbe_eq);
1688         if (ret < budget) {
1689                 phba = pbe_eq->phba;
1690                 blk_iopoll_complete(iop);
1691                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1692                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1693         }
1694         return ret;
1695 }
1696
1697 static void
1698 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1699               unsigned int num_sg, struct beiscsi_io_task *io_task)
1700 {
1701         struct iscsi_sge *psgl;
1702         unsigned short sg_len, index;
1703         unsigned int sge_len = 0;
1704         unsigned long long addr;
1705         struct scatterlist *l_sg;
1706         unsigned int offset;
1707
1708         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1709                                       io_task->bhs_pa.u.a32.address_lo);
1710         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1711                                       io_task->bhs_pa.u.a32.address_hi);
1712
1713         l_sg = sg;
1714         for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1715                 if (index == 0) {
1716                         sg_len = sg_dma_len(sg);
1717                         addr = (u64) sg_dma_address(sg);
1718                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1719                                                         (addr & 0xFFFFFFFF));
1720                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1721                                                         (addr >> 32));
1722                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1723                                                         sg_len);
1724                         sge_len = sg_len;
1725                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1726                                                         1);
1727                 } else {
1728                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1729                                                         0);
1730                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1731                                                         pwrb, sge_len);
1732                         sg_len = sg_dma_len(sg);
1733                         addr = (u64) sg_dma_address(sg);
1734                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1735                                                         (addr & 0xFFFFFFFF));
1736                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1737                                                         (addr >> 32));
1738                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1739                                                         sg_len);
1740                 }
1741         }
1742         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1743         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1744
1745         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1746
1747         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1748                         io_task->bhs_pa.u.a32.address_hi);
1749         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1750                         io_task->bhs_pa.u.a32.address_lo);
1751
1752         if (num_sg == 2)
1753                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1754         sg = l_sg;
1755         psgl++;
1756         psgl++;
1757         offset = 0;
1758         for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1759                 sg_len = sg_dma_len(sg);
1760                 addr = (u64) sg_dma_address(sg);
1761                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1762                                                 (addr & 0xFFFFFFFF));
1763                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1764                                                 (addr >> 32));
1765                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1766                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1767                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1768                 offset += sg_len;
1769         }
1770         psgl--;
1771         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1772 }
1773
1774 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1775 {
1776         struct iscsi_sge *psgl;
1777         unsigned long long addr;
1778         struct beiscsi_io_task *io_task = task->dd_data;
1779         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1780         struct beiscsi_hba *phba = beiscsi_conn->phba;
1781
1782         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1783         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1784                                 io_task->bhs_pa.u.a32.address_lo);
1785         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1786                                 io_task->bhs_pa.u.a32.address_hi);
1787
1788         if (task->data) {
1789                 if (task->data_count) {
1790                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1791                         addr = (u64) pci_map_single(phba->pcidev,
1792                                                     task->data,
1793                                                     task->data_count, 1);
1794                 } else {
1795                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1796                         addr = 0;
1797                 }
1798                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1799                                                 (addr & 0xFFFFFFFF));
1800                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1801                                                 (addr >> 32));
1802                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1803                                                 task->data_count);
1804
1805                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1806         } else {
1807                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1808                 addr = 0;
1809         }
1810
1811         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1812
1813         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1814
1815         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1816                       io_task->bhs_pa.u.a32.address_hi);
1817         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1818                       io_task->bhs_pa.u.a32.address_lo);
1819         if (task->data) {
1820                 psgl++;
1821                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1822                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1823                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1824                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1825                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1826                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1827
1828                 psgl++;
1829                 if (task->data) {
1830                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1831                                                 (addr & 0xFFFFFFFF));
1832                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1833                                                 (addr >> 32));
1834                 }
1835                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1836         }
1837         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1838 }
1839
1840 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1841 {
1842         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1843         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1844         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1845
1846         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1847                                       sizeof(struct sol_cqe));
1848         num_async_pdu_buf_pages =
1849                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1850                                        phba->params.defpdu_hdr_sz);
1851         num_async_pdu_buf_sgl_pages =
1852                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1853                                        sizeof(struct phys_addr));
1854         num_async_pdu_data_pages =
1855                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1856                                        phba->params.defpdu_data_sz);
1857         num_async_pdu_data_sgl_pages =
1858                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1859                                        sizeof(struct phys_addr));
1860
1861         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1862
1863         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1864                                                  BE_ISCSI_PDU_HEADER_SIZE;
1865         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1866                                             sizeof(struct hwi_context_memory);
1867
1868
1869         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1870             * (phba->params.wrbs_per_cxn)
1871             * phba->params.cxns_per_ctrl;
1872         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1873                                  (phba->params.wrbs_per_cxn);
1874         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1875                                 phba->params.cxns_per_ctrl);
1876
1877         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1878                 phba->params.icds_per_ctrl;
1879         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1880                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1881
1882         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1883                 num_async_pdu_buf_pages * PAGE_SIZE;
1884         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1885                 num_async_pdu_data_pages * PAGE_SIZE;
1886         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1887                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1888         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1889                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1890         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1891                 phba->params.asyncpdus_per_ctrl *
1892                 sizeof(struct async_pdu_handle);
1893         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1894                 phba->params.asyncpdus_per_ctrl *
1895                 sizeof(struct async_pdu_handle);
1896         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1897                 sizeof(struct hwi_async_pdu_context) +
1898                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1899 }
1900
1901 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1902 {
1903         struct be_mem_descriptor *mem_descr;
1904         dma_addr_t bus_add;
1905         struct mem_array *mem_arr, *mem_arr_orig;
1906         unsigned int i, j, alloc_size, curr_alloc_size;
1907
1908         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1909         if (!phba->phwi_ctrlr)
1910                 return -ENOMEM;
1911
1912         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1913                                  GFP_KERNEL);
1914         if (!phba->init_mem) {
1915                 kfree(phba->phwi_ctrlr);
1916                 return -ENOMEM;
1917         }
1918
1919         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1920                                GFP_KERNEL);
1921         if (!mem_arr_orig) {
1922                 kfree(phba->init_mem);
1923                 kfree(phba->phwi_ctrlr);
1924                 return -ENOMEM;
1925         }
1926
1927         mem_descr = phba->init_mem;
1928         for (i = 0; i < SE_MEM_MAX; i++) {
1929                 j = 0;
1930                 mem_arr = mem_arr_orig;
1931                 alloc_size = phba->mem_req[i];
1932                 memset(mem_arr, 0, sizeof(struct mem_array) *
1933                        BEISCSI_MAX_FRAGS_INIT);
1934                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1935                 do {
1936                         mem_arr->virtual_address = pci_alloc_consistent(
1937                                                         phba->pcidev,
1938                                                         curr_alloc_size,
1939                                                         &bus_add);
1940                         if (!mem_arr->virtual_address) {
1941                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1942                                         goto free_mem;
1943                                 if (curr_alloc_size -
1944                                         rounddown_pow_of_two(curr_alloc_size))
1945                                         curr_alloc_size = rounddown_pow_of_two
1946                                                              (curr_alloc_size);
1947                                 else
1948                                         curr_alloc_size = curr_alloc_size / 2;
1949                         } else {
1950                                 mem_arr->bus_address.u.
1951                                     a64.address = (__u64) bus_add;
1952                                 mem_arr->size = curr_alloc_size;
1953                                 alloc_size -= curr_alloc_size;
1954                                 curr_alloc_size = min(be_max_phys_size *
1955                                                       1024, alloc_size);
1956                                 j++;
1957                                 mem_arr++;
1958                         }
1959                 } while (alloc_size);
1960                 mem_descr->num_elements = j;
1961                 mem_descr->size_in_bytes = phba->mem_req[i];
1962                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1963                                                GFP_KERNEL);
1964                 if (!mem_descr->mem_array)
1965                         goto free_mem;
1966
1967                 memcpy(mem_descr->mem_array, mem_arr_orig,
1968                        sizeof(struct mem_array) * j);
1969                 mem_descr++;
1970         }
1971         kfree(mem_arr_orig);
1972         return 0;
1973 free_mem:
1974         mem_descr->num_elements = j;
1975         while ((i) || (j)) {
1976                 for (j = mem_descr->num_elements; j > 0; j--) {
1977                         pci_free_consistent(phba->pcidev,
1978                                             mem_descr->mem_array[j - 1].size,
1979                                             mem_descr->mem_array[j - 1].
1980                                             virtual_address,
1981                                             mem_descr->mem_array[j - 1].
1982                                             bus_address.u.a64.address);
1983                 }
1984                 if (i) {
1985                         i--;
1986                         kfree(mem_descr->mem_array);
1987                         mem_descr--;
1988                 }
1989         }
1990         kfree(mem_arr_orig);
1991         kfree(phba->init_mem);
1992         kfree(phba->phwi_ctrlr);
1993         return -ENOMEM;
1994 }
1995
1996 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1997 {
1998         beiscsi_find_mem_req(phba);
1999         return beiscsi_alloc_mem(phba);
2000 }
2001
2002 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2003 {
2004         struct pdu_data_out *pdata_out;
2005         struct pdu_nop_out *pnop_out;
2006         struct be_mem_descriptor *mem_descr;
2007
2008         mem_descr = phba->init_mem;
2009         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2010         pdata_out =
2011             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2012         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2013
2014         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2015                       IIOC_SCSI_DATA);
2016
2017         pnop_out =
2018             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2019                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2020
2021         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2022         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2023         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2024         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2025 }
2026
2027 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2028 {
2029         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2030         struct wrb_handle *pwrb_handle;
2031         struct hwi_controller *phwi_ctrlr;
2032         struct hwi_wrb_context *pwrb_context;
2033         struct iscsi_wrb *pwrb;
2034         unsigned int num_cxn_wrbh;
2035         unsigned int num_cxn_wrb, j, idx, index;
2036
2037         mem_descr_wrbh = phba->init_mem;
2038         mem_descr_wrbh += HWI_MEM_WRBH;
2039
2040         mem_descr_wrb = phba->init_mem;
2041         mem_descr_wrb += HWI_MEM_WRB;
2042
2043         idx = 0;
2044         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2045         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2046                         ((sizeof(struct wrb_handle)) *
2047                          phba->params.wrbs_per_cxn));
2048         phwi_ctrlr = phba->phwi_ctrlr;
2049
2050         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2051                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2052                 pwrb_context->pwrb_handle_base =
2053                                 kzalloc(sizeof(struct wrb_handle *) *
2054                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2055                 pwrb_context->pwrb_handle_basestd =
2056                                 kzalloc(sizeof(struct wrb_handle *) *
2057                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2058                 if (num_cxn_wrbh) {
2059                         pwrb_context->alloc_index = 0;
2060                         pwrb_context->wrb_handles_available = 0;
2061                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2062                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2063                                 pwrb_context->pwrb_handle_basestd[j] =
2064                                                                 pwrb_handle;
2065                                 pwrb_context->wrb_handles_available++;
2066                                 pwrb_handle->wrb_index = j;
2067                                 pwrb_handle++;
2068                         }
2069                         pwrb_context->free_index = 0;
2070                         num_cxn_wrbh--;
2071                 } else {
2072                         idx++;
2073                         pwrb_handle =
2074                             mem_descr_wrbh->mem_array[idx].virtual_address;
2075                         num_cxn_wrbh =
2076                             ((mem_descr_wrbh->mem_array[idx].size) /
2077                              ((sizeof(struct wrb_handle)) *
2078                               phba->params.wrbs_per_cxn));
2079                         pwrb_context->alloc_index = 0;
2080                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2081                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2082                                 pwrb_context->pwrb_handle_basestd[j] =
2083                                     pwrb_handle;
2084                                 pwrb_context->wrb_handles_available++;
2085                                 pwrb_handle->wrb_index = j;
2086                                 pwrb_handle++;
2087                         }
2088                         pwrb_context->free_index = 0;
2089                         num_cxn_wrbh--;
2090                 }
2091         }
2092         idx = 0;
2093         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2094         num_cxn_wrb =
2095             ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2096              phba->params.wrbs_per_cxn);
2097
2098         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2099                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2100                 if (num_cxn_wrb) {
2101                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2102                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2103                                 pwrb_handle->pwrb = pwrb;
2104                                 pwrb++;
2105                         }
2106                         num_cxn_wrb--;
2107                 } else {
2108                         idx++;
2109                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2110                         num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2111                                         (sizeof(struct iscsi_wrb)) *
2112                                         phba->params.wrbs_per_cxn);
2113                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2114                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2115                                 pwrb_handle->pwrb = pwrb;
2116                                 pwrb++;
2117                         }
2118                         num_cxn_wrb--;
2119                 }
2120         }
2121 }
2122
2123 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2124 {
2125         struct hwi_controller *phwi_ctrlr;
2126         struct hba_parameters *p = &phba->params;
2127         struct hwi_async_pdu_context *pasync_ctx;
2128         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2129         unsigned int index;
2130         struct be_mem_descriptor *mem_descr;
2131
2132         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2133         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2134
2135         phwi_ctrlr = phba->phwi_ctrlr;
2136         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2137                                 mem_descr->mem_array[0].virtual_address;
2138         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2139         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2140
2141         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2142         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2143         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2144         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2145
2146         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2147         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2148         if (mem_descr->mem_array[0].virtual_address) {
2149                 SE_DEBUG(DBG_LVL_8,
2150                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2151                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2152         } else
2153                 shost_printk(KERN_WARNING, phba->shost,
2154                              "No Virtual address \n");
2155
2156         pasync_ctx->async_header.va_base =
2157                         mem_descr->mem_array[0].virtual_address;
2158
2159         pasync_ctx->async_header.pa_base.u.a64.address =
2160                         mem_descr->mem_array[0].bus_address.u.a64.address;
2161
2162         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2163         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2164         if (mem_descr->mem_array[0].virtual_address) {
2165                 SE_DEBUG(DBG_LVL_8,
2166                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2167                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2168         } else
2169                 shost_printk(KERN_WARNING, phba->shost,
2170                             "No Virtual address \n");
2171         pasync_ctx->async_header.ring_base =
2172                         mem_descr->mem_array[0].virtual_address;
2173
2174         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2175         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2176         if (mem_descr->mem_array[0].virtual_address) {
2177                 SE_DEBUG(DBG_LVL_8,
2178                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2179                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2180         } else
2181                 shost_printk(KERN_WARNING, phba->shost,
2182                             "No Virtual address \n");
2183
2184         pasync_ctx->async_header.handle_base =
2185                         mem_descr->mem_array[0].virtual_address;
2186         pasync_ctx->async_header.writables = 0;
2187         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2188
2189         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2190         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2191         if (mem_descr->mem_array[0].virtual_address) {
2192                 SE_DEBUG(DBG_LVL_8,
2193                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2194                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2195         } else
2196                 shost_printk(KERN_WARNING, phba->shost,
2197                             "No Virtual address \n");
2198         pasync_ctx->async_data.va_base =
2199                         mem_descr->mem_array[0].virtual_address;
2200         pasync_ctx->async_data.pa_base.u.a64.address =
2201                         mem_descr->mem_array[0].bus_address.u.a64.address;
2202
2203         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2204         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2205         if (mem_descr->mem_array[0].virtual_address) {
2206                 SE_DEBUG(DBG_LVL_8,
2207                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2208                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2209         } else
2210                 shost_printk(KERN_WARNING, phba->shost,
2211                              "No Virtual address \n");
2212
2213         pasync_ctx->async_data.ring_base =
2214                         mem_descr->mem_array[0].virtual_address;
2215
2216         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2217         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2218         if (!mem_descr->mem_array[0].virtual_address)
2219                 shost_printk(KERN_WARNING, phba->shost,
2220                             "No Virtual address \n");
2221
2222         pasync_ctx->async_data.handle_base =
2223                         mem_descr->mem_array[0].virtual_address;
2224         pasync_ctx->async_data.writables = 0;
2225         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2226
2227         pasync_header_h =
2228                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2229         pasync_data_h =
2230                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2231
2232         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2233                 pasync_header_h->cri = -1;
2234                 pasync_header_h->index = (char)index;
2235                 INIT_LIST_HEAD(&pasync_header_h->link);
2236                 pasync_header_h->pbuffer =
2237                         (void *)((unsigned long)
2238                         (pasync_ctx->async_header.va_base) +
2239                         (p->defpdu_hdr_sz * index));
2240
2241                 pasync_header_h->pa.u.a64.address =
2242                         pasync_ctx->async_header.pa_base.u.a64.address +
2243                         (p->defpdu_hdr_sz * index);
2244
2245                 list_add_tail(&pasync_header_h->link,
2246                                 &pasync_ctx->async_header.free_list);
2247                 pasync_header_h++;
2248                 pasync_ctx->async_header.free_entries++;
2249                 pasync_ctx->async_header.writables++;
2250
2251                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2252                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2253                                header_busy_list);
2254                 pasync_data_h->cri = -1;
2255                 pasync_data_h->index = (char)index;
2256                 INIT_LIST_HEAD(&pasync_data_h->link);
2257                 pasync_data_h->pbuffer =
2258                         (void *)((unsigned long)
2259                         (pasync_ctx->async_data.va_base) +
2260                         (p->defpdu_data_sz * index));
2261
2262                 pasync_data_h->pa.u.a64.address =
2263                     pasync_ctx->async_data.pa_base.u.a64.address +
2264                     (p->defpdu_data_sz * index);
2265
2266                 list_add_tail(&pasync_data_h->link,
2267                               &pasync_ctx->async_data.free_list);
2268                 pasync_data_h++;
2269                 pasync_ctx->async_data.free_entries++;
2270                 pasync_ctx->async_data.writables++;
2271
2272                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2273         }
2274
2275         pasync_ctx->async_header.host_write_ptr = 0;
2276         pasync_ctx->async_header.ep_read_ptr = -1;
2277         pasync_ctx->async_data.host_write_ptr = 0;
2278         pasync_ctx->async_data.ep_read_ptr = -1;
2279 }
2280
2281 static int
2282 be_sgl_create_contiguous(void *virtual_address,
2283                          u64 physical_address, u32 length,
2284                          struct be_dma_mem *sgl)
2285 {
2286         WARN_ON(!virtual_address);
2287         WARN_ON(!physical_address);
2288         WARN_ON(!length > 0);
2289         WARN_ON(!sgl);
2290
2291         sgl->va = virtual_address;
2292         sgl->dma = physical_address;
2293         sgl->size = length;
2294
2295         return 0;
2296 }
2297
2298 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2299 {
2300         memset(sgl, 0, sizeof(*sgl));
2301 }
2302
2303 static void
2304 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2305                      struct mem_array *pmem, struct be_dma_mem *sgl)
2306 {
2307         if (sgl->va)
2308                 be_sgl_destroy_contiguous(sgl);
2309
2310         be_sgl_create_contiguous(pmem->virtual_address,
2311                                  pmem->bus_address.u.a64.address,
2312                                  pmem->size, sgl);
2313 }
2314
2315 static void
2316 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2317                            struct mem_array *pmem, struct be_dma_mem *sgl)
2318 {
2319         if (sgl->va)
2320                 be_sgl_destroy_contiguous(sgl);
2321
2322         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2323                                  pmem->bus_address.u.a64.address,
2324                                  pmem->size, sgl);
2325 }
2326
2327 static int be_fill_queue(struct be_queue_info *q,
2328                 u16 len, u16 entry_size, void *vaddress)
2329 {
2330         struct be_dma_mem *mem = &q->dma_mem;
2331
2332         memset(q, 0, sizeof(*q));
2333         q->len = len;
2334         q->entry_size = entry_size;
2335         mem->size = len * entry_size;
2336         mem->va = vaddress;
2337         if (!mem->va)
2338                 return -ENOMEM;
2339         memset(mem->va, 0, mem->size);
2340         return 0;
2341 }
2342
2343 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2344                              struct hwi_context_memory *phwi_context)
2345 {
2346         unsigned int i, num_eq_pages;
2347         int ret, eq_for_mcc;
2348         struct be_queue_info *eq;
2349         struct be_dma_mem *mem;
2350         void *eq_vaddress;
2351         dma_addr_t paddr;
2352
2353         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2354                                       sizeof(struct be_eq_entry));
2355
2356         if (phba->msix_enabled)
2357                 eq_for_mcc = 1;
2358         else
2359                 eq_for_mcc = 0;
2360         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2361                 eq = &phwi_context->be_eq[i].q;
2362                 mem = &eq->dma_mem;
2363                 phwi_context->be_eq[i].phba = phba;
2364                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2365                                                      num_eq_pages * PAGE_SIZE,
2366                                                      &paddr);
2367                 if (!eq_vaddress)
2368                         goto create_eq_error;
2369
2370                 mem->va = eq_vaddress;
2371                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2372                                     sizeof(struct be_eq_entry), eq_vaddress);
2373                 if (ret) {
2374                         shost_printk(KERN_ERR, phba->shost,
2375                                      "be_fill_queue Failed for EQ \n");
2376                         goto create_eq_error;
2377                 }
2378
2379                 mem->dma = paddr;
2380                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2381                                             phwi_context->cur_eqd);
2382                 if (ret) {
2383                         shost_printk(KERN_ERR, phba->shost,
2384                                      "beiscsi_cmd_eq_create"
2385                                      "Failedfor EQ \n");
2386                         goto create_eq_error;
2387                 }
2388                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2389         }
2390         return 0;
2391 create_eq_error:
2392         for (i = 0; i < (phba->num_cpus + 1); i++) {
2393                 eq = &phwi_context->be_eq[i].q;
2394                 mem = &eq->dma_mem;
2395                 if (mem->va)
2396                         pci_free_consistent(phba->pcidev, num_eq_pages
2397                                             * PAGE_SIZE,
2398                                             mem->va, mem->dma);
2399         }
2400         return ret;
2401 }
2402
2403 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2404                              struct hwi_context_memory *phwi_context)
2405 {
2406         unsigned int i, num_cq_pages;
2407         int ret;
2408         struct be_queue_info *cq, *eq;
2409         struct be_dma_mem *mem;
2410         struct be_eq_obj *pbe_eq;
2411         void *cq_vaddress;
2412         dma_addr_t paddr;
2413
2414         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2415                                       sizeof(struct sol_cqe));
2416
2417         for (i = 0; i < phba->num_cpus; i++) {
2418                 cq = &phwi_context->be_cq[i];
2419                 eq = &phwi_context->be_eq[i].q;
2420                 pbe_eq = &phwi_context->be_eq[i];
2421                 pbe_eq->cq = cq;
2422                 pbe_eq->phba = phba;
2423                 mem = &cq->dma_mem;
2424                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2425                                                      num_cq_pages * PAGE_SIZE,
2426                                                      &paddr);
2427                 if (!cq_vaddress)
2428                         goto create_cq_error;
2429                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2430                                     sizeof(struct sol_cqe), cq_vaddress);
2431                 if (ret) {
2432                         shost_printk(KERN_ERR, phba->shost,
2433                                      "be_fill_queue Failed for ISCSI CQ \n");
2434                         goto create_cq_error;
2435                 }
2436
2437                 mem->dma = paddr;
2438                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2439                                             false, 0);
2440                 if (ret) {
2441                         shost_printk(KERN_ERR, phba->shost,
2442                                      "beiscsi_cmd_eq_create"
2443                                      "Failed for ISCSI CQ \n");
2444                         goto create_cq_error;
2445                 }
2446                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2447                                                  cq->id, eq->id);
2448                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2449         }
2450         return 0;
2451
2452 create_cq_error:
2453         for (i = 0; i < phba->num_cpus; i++) {
2454                 cq = &phwi_context->be_cq[i];
2455                 mem = &cq->dma_mem;
2456                 if (mem->va)
2457                         pci_free_consistent(phba->pcidev, num_cq_pages
2458                                             * PAGE_SIZE,
2459                                             mem->va, mem->dma);
2460         }
2461         return ret;
2462
2463 }
2464
2465 static int
2466 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2467                        struct hwi_context_memory *phwi_context,
2468                        struct hwi_controller *phwi_ctrlr,
2469                        unsigned int def_pdu_ring_sz)
2470 {
2471         unsigned int idx;
2472         int ret;
2473         struct be_queue_info *dq, *cq;
2474         struct be_dma_mem *mem;
2475         struct be_mem_descriptor *mem_descr;
2476         void *dq_vaddress;
2477
2478         idx = 0;
2479         dq = &phwi_context->be_def_hdrq;
2480         cq = &phwi_context->be_cq[0];
2481         mem = &dq->dma_mem;
2482         mem_descr = phba->init_mem;
2483         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2484         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2485         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2486                             sizeof(struct phys_addr),
2487                             sizeof(struct phys_addr), dq_vaddress);
2488         if (ret) {
2489                 shost_printk(KERN_ERR, phba->shost,
2490                              "be_fill_queue Failed for DEF PDU HDR\n");
2491                 return ret;
2492         }
2493         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2494         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2495                                               def_pdu_ring_sz,
2496                                               phba->params.defpdu_hdr_sz);
2497         if (ret) {
2498                 shost_printk(KERN_ERR, phba->shost,
2499                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2500                 return ret;
2501         }
2502         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2503         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2504                  phwi_context->be_def_hdrq.id);
2505         hwi_post_async_buffers(phba, 1);
2506         return 0;
2507 }
2508
2509 static int
2510 beiscsi_create_def_data(struct beiscsi_hba *phba,
2511                         struct hwi_context_memory *phwi_context,
2512                         struct hwi_controller *phwi_ctrlr,
2513                         unsigned int def_pdu_ring_sz)
2514 {
2515         unsigned int idx;
2516         int ret;
2517         struct be_queue_info *dataq, *cq;
2518         struct be_dma_mem *mem;
2519         struct be_mem_descriptor *mem_descr;
2520         void *dq_vaddress;
2521
2522         idx = 0;
2523         dataq = &phwi_context->be_def_dataq;
2524         cq = &phwi_context->be_cq[0];
2525         mem = &dataq->dma_mem;
2526         mem_descr = phba->init_mem;
2527         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2528         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2529         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2530                             sizeof(struct phys_addr),
2531                             sizeof(struct phys_addr), dq_vaddress);
2532         if (ret) {
2533                 shost_printk(KERN_ERR, phba->shost,
2534                              "be_fill_queue Failed for DEF PDU DATA\n");
2535                 return ret;
2536         }
2537         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2538         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2539                                               def_pdu_ring_sz,
2540                                               phba->params.defpdu_data_sz);
2541         if (ret) {
2542                 shost_printk(KERN_ERR, phba->shost,
2543                              "be_cmd_create_default_pdu_queue Failed"
2544                              " for DEF PDU DATA\n");
2545                 return ret;
2546         }
2547         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2548         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2549                  phwi_context->be_def_dataq.id);
2550         hwi_post_async_buffers(phba, 0);
2551         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2552         return 0;
2553 }
2554
2555 static int
2556 beiscsi_post_pages(struct beiscsi_hba *phba)
2557 {
2558         struct be_mem_descriptor *mem_descr;
2559         struct mem_array *pm_arr;
2560         unsigned int page_offset, i;
2561         struct be_dma_mem sgl;
2562         int status;
2563
2564         mem_descr = phba->init_mem;
2565         mem_descr += HWI_MEM_SGE;
2566         pm_arr = mem_descr->mem_array;
2567
2568         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2569                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2570         for (i = 0; i < mem_descr->num_elements; i++) {
2571                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2572                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2573                                                 page_offset,
2574                                                 (pm_arr->size / PAGE_SIZE));
2575                 page_offset += pm_arr->size / PAGE_SIZE;
2576                 if (status != 0) {
2577                         shost_printk(KERN_ERR, phba->shost,
2578                                      "post sgl failed.\n");
2579                         return status;
2580                 }
2581                 pm_arr++;
2582         }
2583         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2584         return 0;
2585 }
2586
2587 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2588 {
2589         struct be_dma_mem *mem = &q->dma_mem;
2590         if (mem->va)
2591                 pci_free_consistent(phba->pcidev, mem->size,
2592                         mem->va, mem->dma);
2593 }
2594
2595 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2596                 u16 len, u16 entry_size)
2597 {
2598         struct be_dma_mem *mem = &q->dma_mem;
2599
2600         memset(q, 0, sizeof(*q));
2601         q->len = len;
2602         q->entry_size = entry_size;
2603         mem->size = len * entry_size;
2604         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2605         if (!mem->va)
2606                 return -1;
2607         memset(mem->va, 0, mem->size);
2608         return 0;
2609 }
2610
2611 static int
2612 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2613                          struct hwi_context_memory *phwi_context,
2614                          struct hwi_controller *phwi_ctrlr)
2615 {
2616         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2617         u64 pa_addr_lo;
2618         unsigned int idx, num, i;
2619         struct mem_array *pwrb_arr;
2620         void *wrb_vaddr;
2621         struct be_dma_mem sgl;
2622         struct be_mem_descriptor *mem_descr;
2623         int status;
2624
2625         idx = 0;
2626         mem_descr = phba->init_mem;
2627         mem_descr += HWI_MEM_WRB;
2628         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2629                            GFP_KERNEL);
2630         if (!pwrb_arr) {
2631                 shost_printk(KERN_ERR, phba->shost,
2632                              "Memory alloc failed in create wrb ring.\n");
2633                 return -ENOMEM;
2634         }
2635         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2636         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2637         num_wrb_rings = mem_descr->mem_array[idx].size /
2638                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2639
2640         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2641                 if (num_wrb_rings) {
2642                         pwrb_arr[num].virtual_address = wrb_vaddr;
2643                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2644                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2645                                             sizeof(struct iscsi_wrb);
2646                         wrb_vaddr += pwrb_arr[num].size;
2647                         pa_addr_lo += pwrb_arr[num].size;
2648                         num_wrb_rings--;
2649                 } else {
2650                         idx++;
2651                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2652                         pa_addr_lo = mem_descr->mem_array[idx].\
2653                                         bus_address.u.a64.address;
2654                         num_wrb_rings = mem_descr->mem_array[idx].size /
2655                                         (phba->params.wrbs_per_cxn *
2656                                         sizeof(struct iscsi_wrb));
2657                         pwrb_arr[num].virtual_address = wrb_vaddr;
2658                         pwrb_arr[num].bus_address.u.a64.address\
2659                                                 = pa_addr_lo;
2660                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2661                                                  sizeof(struct iscsi_wrb);
2662                         wrb_vaddr += pwrb_arr[num].size;
2663                         pa_addr_lo   += pwrb_arr[num].size;
2664                         num_wrb_rings--;
2665                 }
2666         }
2667         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2668                 wrb_mem_index = 0;
2669                 offset = 0;
2670                 size = 0;
2671
2672                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2673                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2674                                             &phwi_context->be_wrbq[i]);
2675                 if (status != 0) {
2676                         shost_printk(KERN_ERR, phba->shost,
2677                                      "wrbq create failed.");
2678                         return status;
2679                 }
2680                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2681                                                                    id;
2682         }
2683         kfree(pwrb_arr);
2684         return 0;
2685 }
2686
2687 static void free_wrb_handles(struct beiscsi_hba *phba)
2688 {
2689         unsigned int index;
2690         struct hwi_controller *phwi_ctrlr;
2691         struct hwi_wrb_context *pwrb_context;
2692
2693         phwi_ctrlr = phba->phwi_ctrlr;
2694         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2695                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2696                 kfree(pwrb_context->pwrb_handle_base);
2697                 kfree(pwrb_context->pwrb_handle_basestd);
2698         }
2699 }
2700
2701 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2702 {
2703         struct be_queue_info *q;
2704         struct be_ctrl_info *ctrl = &phba->ctrl;
2705
2706         q = &phba->ctrl.mcc_obj.q;
2707         if (q->created)
2708                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2709         be_queue_free(phba, q);
2710
2711         q = &phba->ctrl.mcc_obj.cq;
2712         if (q->created)
2713                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2714         be_queue_free(phba, q);
2715 }
2716
2717 static void hwi_cleanup(struct beiscsi_hba *phba)
2718 {
2719         struct be_queue_info *q;
2720         struct be_ctrl_info *ctrl = &phba->ctrl;
2721         struct hwi_controller *phwi_ctrlr;
2722         struct hwi_context_memory *phwi_context;
2723         int i, eq_num;
2724
2725         phwi_ctrlr = phba->phwi_ctrlr;
2726         phwi_context = phwi_ctrlr->phwi_ctxt;
2727         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2728                 q = &phwi_context->be_wrbq[i];
2729                 if (q->created)
2730                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2731         }
2732         free_wrb_handles(phba);
2733
2734         q = &phwi_context->be_def_hdrq;
2735         if (q->created)
2736                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2737
2738         q = &phwi_context->be_def_dataq;
2739         if (q->created)
2740                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2741
2742         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2743
2744         for (i = 0; i < (phba->num_cpus); i++) {
2745                 q = &phwi_context->be_cq[i];
2746                 if (q->created)
2747                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2748         }
2749         if (phba->msix_enabled)
2750                 eq_num = 1;
2751         else
2752                 eq_num = 0;
2753         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2754                 q = &phwi_context->be_eq[i].q;
2755                 if (q->created)
2756                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2757         }
2758         be_mcc_queues_destroy(phba);
2759 }
2760
2761 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2762                                 struct hwi_context_memory *phwi_context)
2763 {
2764         struct be_queue_info *q, *cq;
2765         struct be_ctrl_info *ctrl = &phba->ctrl;
2766
2767         /* Alloc MCC compl queue */
2768         cq = &phba->ctrl.mcc_obj.cq;
2769         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2770                         sizeof(struct be_mcc_compl)))
2771                 goto err;
2772         /* Ask BE to create MCC compl queue; */
2773         if (phba->msix_enabled) {
2774                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2775                                          [phba->num_cpus].q, false, true, 0))
2776                 goto mcc_cq_free;
2777         } else {
2778                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2779                                           false, true, 0))
2780                 goto mcc_cq_free;
2781         }
2782
2783         /* Alloc MCC queue */
2784         q = &phba->ctrl.mcc_obj.q;
2785         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2786                 goto mcc_cq_destroy;
2787
2788         /* Ask BE to create MCC queue */
2789         if (beiscsi_cmd_mccq_create(phba, q, cq))
2790                 goto mcc_q_free;
2791
2792         return 0;
2793
2794 mcc_q_free:
2795         be_queue_free(phba, q);
2796 mcc_cq_destroy:
2797         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2798 mcc_cq_free:
2799         be_queue_free(phba, cq);
2800 err:
2801         return -1;
2802 }
2803
2804 static int find_num_cpus(void)
2805 {
2806         int  num_cpus = 0;
2807
2808         num_cpus = num_online_cpus();
2809         if (num_cpus >= MAX_CPUS)
2810                 num_cpus = MAX_CPUS - 1;
2811
2812         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2813         return num_cpus;
2814 }
2815
2816 static int hwi_init_port(struct beiscsi_hba *phba)
2817 {
2818         struct hwi_controller *phwi_ctrlr;
2819         struct hwi_context_memory *phwi_context;
2820         unsigned int def_pdu_ring_sz;
2821         struct be_ctrl_info *ctrl = &phba->ctrl;
2822         int status;
2823
2824         def_pdu_ring_sz =
2825                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2826         phwi_ctrlr = phba->phwi_ctrlr;
2827         phwi_context = phwi_ctrlr->phwi_ctxt;
2828         phwi_context->max_eqd = 0;
2829         phwi_context->min_eqd = 0;
2830         phwi_context->cur_eqd = 64;
2831         be_cmd_fw_initialize(&phba->ctrl);
2832
2833         status = beiscsi_create_eqs(phba, phwi_context);
2834         if (status != 0) {
2835                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2836                 goto error;
2837         }
2838
2839         status = be_mcc_queues_create(phba, phwi_context);
2840         if (status != 0)
2841                 goto error;
2842
2843         status = mgmt_check_supported_fw(ctrl, phba);
2844         if (status != 0) {
2845                 shost_printk(KERN_ERR, phba->shost,
2846                              "Unsupported fw version \n");
2847                 goto error;
2848         }
2849
2850         if (phba->fw_config.iscsi_features == 0x1)
2851                 ring_mode = 1;
2852         else
2853                 ring_mode = 0;
2854
2855         status = beiscsi_create_cqs(phba, phwi_context);
2856         if (status != 0) {
2857                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2858                 goto error;
2859         }
2860
2861         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2862                                         def_pdu_ring_sz);
2863         if (status != 0) {
2864                 shost_printk(KERN_ERR, phba->shost,
2865                              "Default Header not created\n");
2866                 goto error;
2867         }
2868
2869         status = beiscsi_create_def_data(phba, phwi_context,
2870                                          phwi_ctrlr, def_pdu_ring_sz);
2871         if (status != 0) {
2872                 shost_printk(KERN_ERR, phba->shost,
2873                              "Default Data not created\n");
2874                 goto error;
2875         }
2876
2877         status = beiscsi_post_pages(phba);
2878         if (status != 0) {
2879                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2880                 goto error;
2881         }
2882
2883         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2884         if (status != 0) {
2885                 shost_printk(KERN_ERR, phba->shost,
2886                              "WRB Rings not created\n");
2887                 goto error;
2888         }
2889
2890         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2891         return 0;
2892
2893 error:
2894         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2895         hwi_cleanup(phba);
2896         return -ENOMEM;
2897 }
2898
2899 static int hwi_init_controller(struct beiscsi_hba *phba)
2900 {
2901         struct hwi_controller *phwi_ctrlr;
2902
2903         phwi_ctrlr = phba->phwi_ctrlr;
2904         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2905                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2906                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2907                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2908                          phwi_ctrlr->phwi_ctxt);
2909         } else {
2910                 shost_printk(KERN_ERR, phba->shost,
2911                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2912                              "Failing to load\n");
2913                 return -ENOMEM;
2914         }
2915
2916         iscsi_init_global_templates(phba);
2917         beiscsi_init_wrb_handle(phba);
2918         hwi_init_async_pdu_ctx(phba);
2919         if (hwi_init_port(phba) != 0) {
2920                 shost_printk(KERN_ERR, phba->shost,
2921                              "hwi_init_controller failed\n");
2922                 return -ENOMEM;
2923         }
2924         return 0;
2925 }
2926
2927 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2928 {
2929         struct be_mem_descriptor *mem_descr;
2930         int i, j;
2931
2932         mem_descr = phba->init_mem;
2933         i = 0;
2934         j = 0;
2935         for (i = 0; i < SE_MEM_MAX; i++) {
2936                 for (j = mem_descr->num_elements; j > 0; j--) {
2937                         pci_free_consistent(phba->pcidev,
2938                           mem_descr->mem_array[j - 1].size,
2939                           mem_descr->mem_array[j - 1].virtual_address,
2940                           mem_descr->mem_array[j - 1].bus_address.
2941                                 u.a64.address);
2942                 }
2943                 kfree(mem_descr->mem_array);
2944                 mem_descr++;
2945         }
2946         kfree(phba->init_mem);
2947         kfree(phba->phwi_ctrlr);
2948 }
2949
2950 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2951 {
2952         int ret = -ENOMEM;
2953
2954         ret = beiscsi_get_memory(phba);
2955         if (ret < 0) {
2956                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2957                              "Failed in beiscsi_alloc_memory \n");
2958                 return ret;
2959         }
2960
2961         ret = hwi_init_controller(phba);
2962         if (ret)
2963                 goto free_init;
2964         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2965         return 0;
2966
2967 free_init:
2968         beiscsi_free_mem(phba);
2969         return -ENOMEM;
2970 }
2971
2972 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2973 {
2974         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2975         struct sgl_handle *psgl_handle;
2976         struct iscsi_sge *pfrag;
2977         unsigned int arr_index, i, idx;
2978
2979         phba->io_sgl_hndl_avbl = 0;
2980         phba->eh_sgl_hndl_avbl = 0;
2981
2982         if (ring_mode) {
2983                 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2984                                               phba->params.icds_per_ctrl,
2985                                                  GFP_KERNEL);
2986                 if (!phba->sgl_hndl_array) {
2987                         shost_printk(KERN_ERR, phba->shost,
2988                              "Mem Alloc Failed. Failing to load\n");
2989                         return -ENOMEM;
2990                 }
2991         }
2992
2993         mem_descr_sglh = phba->init_mem;
2994         mem_descr_sglh += HWI_MEM_SGLH;
2995         if (1 == mem_descr_sglh->num_elements) {
2996                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2997                                                  phba->params.ios_per_ctrl,
2998                                                  GFP_KERNEL);
2999                 if (!phba->io_sgl_hndl_base) {
3000                         if (ring_mode)
3001                                 kfree(phba->sgl_hndl_array);
3002                         shost_printk(KERN_ERR, phba->shost,
3003                                      "Mem Alloc Failed. Failing to load\n");
3004                         return -ENOMEM;
3005                 }
3006                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3007                                                  (phba->params.icds_per_ctrl -
3008                                                  phba->params.ios_per_ctrl),
3009                                                  GFP_KERNEL);
3010                 if (!phba->eh_sgl_hndl_base) {
3011                         kfree(phba->io_sgl_hndl_base);
3012                         shost_printk(KERN_ERR, phba->shost,
3013                                      "Mem Alloc Failed. Failing to load\n");
3014                         return -ENOMEM;
3015                 }
3016         } else {
3017                 shost_printk(KERN_ERR, phba->shost,
3018                              "HWI_MEM_SGLH is more than one element."
3019                              "Failing to load\n");
3020                 return -ENOMEM;
3021         }
3022
3023         arr_index = 0;
3024         idx = 0;
3025         while (idx < mem_descr_sglh->num_elements) {
3026                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3027
3028                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3029                       sizeof(struct sgl_handle)); i++) {
3030                         if (arr_index < phba->params.ios_per_ctrl) {
3031                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3032                                 phba->io_sgl_hndl_avbl++;
3033                                 arr_index++;
3034                         } else {
3035                                 phba->eh_sgl_hndl_base[arr_index -
3036                                         phba->params.ios_per_ctrl] =
3037                                                                 psgl_handle;
3038                                 arr_index++;
3039                                 phba->eh_sgl_hndl_avbl++;
3040                         }
3041                         psgl_handle++;
3042                 }
3043                 idx++;
3044         }
3045         SE_DEBUG(DBG_LVL_8,
3046                  "phba->io_sgl_hndl_avbl=%d"
3047                  "phba->eh_sgl_hndl_avbl=%d \n",
3048                  phba->io_sgl_hndl_avbl,
3049                  phba->eh_sgl_hndl_avbl);
3050         mem_descr_sg = phba->init_mem;
3051         mem_descr_sg += HWI_MEM_SGE;
3052         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3053                  mem_descr_sg->num_elements);
3054         arr_index = 0;
3055         idx = 0;
3056         while (idx < mem_descr_sg->num_elements) {
3057                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3058
3059                 for (i = 0;
3060                      i < (mem_descr_sg->mem_array[idx].size) /
3061                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3062                      i++) {
3063                         if (arr_index < phba->params.ios_per_ctrl)
3064                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3065                         else
3066                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3067                                                 phba->params.ios_per_ctrl];
3068                         psgl_handle->pfrag = pfrag;
3069                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3070                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3071                         pfrag += phba->params.num_sge_per_io;
3072                         psgl_handle->sgl_index =
3073                                 phba->fw_config.iscsi_icd_start + arr_index++;
3074                 }
3075                 idx++;
3076         }
3077         phba->io_sgl_free_index = 0;
3078         phba->io_sgl_alloc_index = 0;
3079         phba->eh_sgl_free_index = 0;
3080         phba->eh_sgl_alloc_index = 0;
3081         return 0;
3082 }
3083
3084 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3085 {
3086         int i, new_cid;
3087
3088         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3089                                   GFP_KERNEL);
3090         if (!phba->cid_array) {
3091                 shost_printk(KERN_ERR, phba->shost,
3092                              "Failed to allocate memory in "
3093                              "hba_setup_cid_tbls\n");
3094                 return -ENOMEM;
3095         }
3096         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3097                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3098         if (!phba->ep_array) {
3099                 shost_printk(KERN_ERR, phba->shost,
3100                              "Failed to allocate memory in "
3101                              "hba_setup_cid_tbls \n");
3102                 kfree(phba->cid_array);
3103                 return -ENOMEM;
3104         }
3105         new_cid = phba->fw_config.iscsi_cid_start;
3106         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3107                 phba->cid_array[i] = new_cid;
3108                 new_cid += 2;
3109         }
3110         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3111         return 0;
3112 }
3113
3114 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3115 {
3116         struct be_ctrl_info *ctrl = &phba->ctrl;
3117         struct hwi_controller *phwi_ctrlr;
3118         struct hwi_context_memory *phwi_context;
3119         struct be_queue_info *eq;
3120         u8 __iomem *addr;
3121         u32 reg, i;
3122         u32 enabled;
3123
3124         phwi_ctrlr = phba->phwi_ctrlr;
3125         phwi_context = phwi_ctrlr->phwi_ctxt;
3126
3127         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3128                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3129         reg = ioread32(addr);
3130         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3131
3132         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3133         if (!enabled) {
3134                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3135                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3136                 iowrite32(reg, addr);
3137                 for (i = 0; i <= phba->num_cpus; i++) {
3138                         eq = &phwi_context->be_eq[i].q;
3139                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3140                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3141                 }
3142         } else
3143                 shost_printk(KERN_WARNING, phba->shost,
3144                              "In hwi_enable_intr, Not Enabled \n");
3145         return true;
3146 }
3147
3148 static void hwi_disable_intr(struct beiscsi_hba *phba)
3149 {
3150         struct be_ctrl_info *ctrl = &phba->ctrl;
3151
3152         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3153         u32 reg = ioread32(addr);
3154
3155         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3156         if (enabled) {
3157                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3158                 iowrite32(reg, addr);
3159         } else
3160                 shost_printk(KERN_WARNING, phba->shost,
3161                              "In hwi_disable_intr, Already Disabled \n");
3162 }
3163
3164 static int beiscsi_init_port(struct beiscsi_hba *phba)
3165 {
3166         int ret;
3167
3168         ret = beiscsi_init_controller(phba);
3169         if (ret < 0) {
3170                 shost_printk(KERN_ERR, phba->shost,
3171                              "beiscsi_dev_probe - Failed in"
3172                              "beiscsi_init_controller \n");
3173                 return ret;
3174         }
3175         ret = beiscsi_init_sgl_handle(phba);
3176         if (ret < 0) {
3177                 shost_printk(KERN_ERR, phba->shost,
3178                              "beiscsi_dev_probe - Failed in"
3179                              "beiscsi_init_sgl_handle \n");
3180                 goto do_cleanup_ctrlr;
3181         }
3182
3183         if (hba_setup_cid_tbls(phba)) {
3184                 shost_printk(KERN_ERR, phba->shost,
3185                              "Failed in hba_setup_cid_tbls\n");
3186                 if (ring_mode)
3187                         kfree(phba->sgl_hndl_array);
3188                 kfree(phba->io_sgl_hndl_base);
3189                 kfree(phba->eh_sgl_hndl_base);
3190                 goto do_cleanup_ctrlr;
3191         }
3192
3193         return ret;
3194
3195 do_cleanup_ctrlr:
3196         hwi_cleanup(phba);
3197         return ret;
3198 }
3199
3200 static void hwi_purge_eq(struct beiscsi_hba *phba)
3201 {
3202         struct hwi_controller *phwi_ctrlr;
3203         struct hwi_context_memory *phwi_context;
3204         struct be_queue_info *eq;
3205         struct be_eq_entry *eqe = NULL;
3206         int i, eq_msix;
3207         unsigned int num_processed;
3208
3209         phwi_ctrlr = phba->phwi_ctrlr;
3210         phwi_context = phwi_ctrlr->phwi_ctxt;
3211         if (phba->msix_enabled)
3212                 eq_msix = 1;
3213         else
3214                 eq_msix = 0;
3215
3216         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3217                 eq = &phwi_context->be_eq[i].q;
3218                 eqe = queue_tail_node(eq);
3219                 num_processed = 0;
3220                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3221                                         & EQE_VALID_MASK) {
3222                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3223                         queue_tail_inc(eq);
3224                         eqe = queue_tail_node(eq);
3225                         num_processed++;
3226                 }
3227
3228                 if (num_processed)
3229                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3230         }
3231 }
3232
3233 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3234 {
3235         unsigned char mgmt_status;
3236
3237         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3238         if (mgmt_status)
3239                 shost_printk(KERN_WARNING, phba->shost,
3240                              "mgmt_epfw_cleanup FAILED \n");
3241
3242         hwi_purge_eq(phba);
3243         hwi_cleanup(phba);
3244         if (ring_mode)
3245                 kfree(phba->sgl_hndl_array);
3246         kfree(phba->io_sgl_hndl_base);
3247         kfree(phba->eh_sgl_hndl_base);
3248         kfree(phba->cid_array);
3249         kfree(phba->ep_array);
3250 }
3251
3252 void
3253 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3254                            struct beiscsi_offload_params *params)
3255 {
3256         struct wrb_handle *pwrb_handle;
3257         struct iscsi_target_context_update_wrb *pwrb = NULL;
3258         struct be_mem_descriptor *mem_descr;
3259         struct beiscsi_hba *phba = beiscsi_conn->phba;
3260         u32 doorbell = 0;
3261
3262         /*
3263          * We can always use 0 here because it is reserved by libiscsi for
3264          * login/startup related tasks.
3265          */
3266         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3267                                        phba->fw_config.iscsi_cid_start));
3268         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3269         memset(pwrb, 0, sizeof(*pwrb));
3270         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3271                       max_burst_length, pwrb, params->dw[offsetof
3272                       (struct amap_beiscsi_offload_params,
3273                       max_burst_length) / 32]);
3274         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3275                       max_send_data_segment_length, pwrb,
3276                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3277                       max_send_data_segment_length) / 32]);
3278         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3279                       first_burst_length,
3280                       pwrb,
3281                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3282                       first_burst_length) / 32]);
3283
3284         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3285                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3286                       erl) / 32] & OFFLD_PARAMS_ERL));
3287         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3288                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3289                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3290         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3291                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3292                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3293         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3294                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3295                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3296         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3297                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3298                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3299         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3300                       pwrb,
3301                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3302                       exp_statsn) / 32] + 1));
3303         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3304                       0x7);
3305         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3306                       pwrb, pwrb_handle->wrb_index);
3307         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3308                       pwrb, pwrb_handle->nxt_wrb_index);
3309         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3310                         session_state, pwrb, 0);
3311         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3312                       pwrb, 1);
3313         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3314                       pwrb, 0);
3315         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3316                       0);
3317
3318         mem_descr = phba->init_mem;
3319         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3320
3321         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3322                         pad_buffer_addr_hi, pwrb,
3323                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3324         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3325                         pad_buffer_addr_lo, pwrb,
3326                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3327
3328         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3329
3330         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3331         if (!ring_mode)
3332                 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3333                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3334         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3335
3336         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3337 }
3338
3339 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3340                               int *index, int *age)
3341 {
3342         *index = (int)itt;
3343         if (age)
3344                 *age = conn->session->age;
3345 }
3346
3347 /**
3348  * beiscsi_alloc_pdu - allocates pdu and related resources
3349  * @task: libiscsi task
3350  * @opcode: opcode of pdu for task
3351  *
3352  * This is called with the session lock held. It will allocate
3353  * the wrb and sgl if needed for the command. And it will prep
3354  * the pdu's itt. beiscsi_parse_pdu will later translate
3355  * the pdu itt to the libiscsi task itt.
3356  */
3357 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3358 {
3359         struct beiscsi_io_task *io_task = task->dd_data;
3360         struct iscsi_conn *conn = task->conn;
3361         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3362         struct beiscsi_hba *phba = beiscsi_conn->phba;
3363         struct hwi_wrb_context *pwrb_context;
3364         struct hwi_controller *phwi_ctrlr;
3365         itt_t itt;
3366         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3367         dma_addr_t paddr;
3368
3369         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3370                                           GFP_KERNEL, &paddr);
3371         if (!io_task->cmd_bhs)
3372                 return -ENOMEM;
3373         io_task->bhs_pa.u.a64.address = paddr;
3374         io_task->libiscsi_itt = (itt_t)task->itt;
3375         io_task->pwrb_handle = alloc_wrb_handle(phba,
3376                                                 beiscsi_conn->beiscsi_conn_cid -
3377                                                 phba->fw_config.iscsi_cid_start
3378                                                 );
3379         io_task->conn = beiscsi_conn;
3380
3381         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3382         task->hdr_max = sizeof(struct be_cmd_bhs);
3383
3384         if (task->sc) {
3385                 spin_lock(&phba->io_sgl_lock);
3386                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3387                 spin_unlock(&phba->io_sgl_lock);
3388                 if (!io_task->psgl_handle)
3389                         goto free_hndls;
3390         } else {
3391                 io_task->scsi_cmnd = NULL;
3392                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3393                         if (!beiscsi_conn->login_in_progress) {
3394                                 spin_lock(&phba->mgmt_sgl_lock);
3395                                 io_task->psgl_handle = (struct sgl_handle *)
3396                                                 alloc_mgmt_sgl_handle(phba);
3397                                 spin_unlock(&phba->mgmt_sgl_lock);
3398                                 if (!io_task->psgl_handle)
3399                                         goto free_hndls;
3400
3401                                 beiscsi_conn->login_in_progress = 1;
3402                                 beiscsi_conn->plogin_sgl_handle =
3403                                                         io_task->psgl_handle;
3404                         } else {
3405                                 io_task->psgl_handle =
3406                                                 beiscsi_conn->plogin_sgl_handle;
3407                         }
3408                 } else {
3409                         spin_lock(&phba->mgmt_sgl_lock);
3410                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3411                         spin_unlock(&phba->mgmt_sgl_lock);
3412                         if (!io_task->psgl_handle)
3413                                 goto free_hndls;
3414                 }
3415         }
3416         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3417                                  wrb_index << 16) | (unsigned int)
3418                                 (io_task->psgl_handle->sgl_index));
3419         if (ring_mode) {
3420                 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3421                                      phba->fw_config.iscsi_icd_start] =
3422                                      io_task->psgl_handle;
3423                 io_task->psgl_handle->task = task;
3424                 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid  -
3425                                             phba->fw_config.iscsi_cid_start;
3426         } else
3427                 io_task->pwrb_handle->pio_handle = task;
3428
3429         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3430         return 0;
3431
3432 free_hndls:
3433         phwi_ctrlr = phba->phwi_ctrlr;
3434         pwrb_context = &phwi_ctrlr->wrb_context[
3435                         beiscsi_conn->beiscsi_conn_cid -
3436                         phba->fw_config.iscsi_cid_start];
3437         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3438         io_task->pwrb_handle = NULL;
3439         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3440                       io_task->bhs_pa.u.a64.address);
3441         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3442         return -ENOMEM;
3443 }
3444
3445 static void beiscsi_cleanup_task(struct iscsi_task *task)
3446 {
3447         struct beiscsi_io_task *io_task = task->dd_data;
3448         struct iscsi_conn *conn = task->conn;
3449         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3450         struct beiscsi_hba *phba = beiscsi_conn->phba;
3451         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3452         struct hwi_wrb_context *pwrb_context;
3453         struct hwi_controller *phwi_ctrlr;
3454
3455         phwi_ctrlr = phba->phwi_ctrlr;
3456         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3457                         - phba->fw_config.iscsi_cid_start];
3458         if (io_task->pwrb_handle) {
3459                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3460                 io_task->pwrb_handle = NULL;
3461         }
3462
3463         if (io_task->cmd_bhs) {
3464                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3465                               io_task->bhs_pa.u.a64.address);
3466         }
3467
3468         if (task->sc) {
3469                 if (io_task->psgl_handle) {
3470                         spin_lock(&phba->io_sgl_lock);
3471                         free_io_sgl_handle(phba, io_task->psgl_handle);
3472                         spin_unlock(&phba->io_sgl_lock);
3473                         io_task->psgl_handle = NULL;
3474                 }
3475         } else {
3476                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3477                         return;
3478                 if (io_task->psgl_handle) {
3479                         spin_lock(&phba->mgmt_sgl_lock);
3480                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3481                         spin_unlock(&phba->mgmt_sgl_lock);
3482                         io_task->psgl_handle = NULL;
3483                 }
3484         }
3485 }
3486
3487 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3488                           unsigned int num_sg, unsigned int xferlen,
3489                           unsigned int writedir)
3490 {
3491
3492         struct beiscsi_io_task *io_task = task->dd_data;
3493         struct iscsi_conn *conn = task->conn;
3494         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3495         struct beiscsi_hba *phba = beiscsi_conn->phba;
3496         struct iscsi_wrb *pwrb = NULL;
3497         unsigned int doorbell = 0;
3498
3499         pwrb = io_task->pwrb_handle->pwrb;
3500         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3501         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3502
3503         if (writedir) {
3504                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3505                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3506                               &io_task->cmd_bhs->iscsi_data_pdu,
3507                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3508                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3509                               &io_task->cmd_bhs->iscsi_data_pdu,
3510                               ISCSI_OPCODE_SCSI_DATA_OUT);
3511                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3512                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3513                 if (ring_mode)
3514                         io_task->psgl_handle->type = INI_WR_CMD;
3515                 else
3516                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3517                                       INI_WR_CMD);
3518                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3519         } else {
3520                 if (ring_mode)
3521                         io_task->psgl_handle->type = INI_RD_CMD;
3522                 else
3523                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3524                                       INI_RD_CMD);
3525                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3526         }
3527         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3528                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3529                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3530
3531         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3532                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3533                                   lun[0]));
3534         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3535         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3536                       io_task->pwrb_handle->wrb_index);
3537         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3538                       be32_to_cpu(task->cmdsn));
3539         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3540                       io_task->psgl_handle->sgl_index);
3541
3542         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3543
3544         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3545                       io_task->pwrb_handle->nxt_wrb_index);
3546         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3547
3548         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3549         if (!ring_mode)
3550                 doorbell |= (io_task->pwrb_handle->wrb_index &
3551                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3552         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3553
3554         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3555         return 0;
3556 }
3557
3558 static int beiscsi_mtask(struct iscsi_task *task)
3559 {
3560         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3561         struct iscsi_conn *conn = task->conn;
3562         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3563         struct beiscsi_hba *phba = beiscsi_conn->phba;
3564         struct iscsi_session *session;
3565         struct iscsi_wrb *pwrb = NULL;
3566         struct hwi_controller *phwi_ctrlr;
3567         struct hwi_wrb_context *pwrb_context;
3568         struct wrb_handle *pwrb_handle;
3569         unsigned int doorbell = 0;
3570         unsigned int i, cid;
3571         struct iscsi_task *aborted_task;
3572         unsigned int tag;
3573
3574         cid = beiscsi_conn->beiscsi_conn_cid;
3575         pwrb = io_task->pwrb_handle->pwrb;
3576         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3577                       be32_to_cpu(task->cmdsn));
3578         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3579                       io_task->pwrb_handle->wrb_index);
3580         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3581                       io_task->psgl_handle->sgl_index);
3582
3583         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3584         case ISCSI_OP_LOGIN:
3585                 if (ring_mode)
3586                         io_task->psgl_handle->type = TGT_DM_CMD;
3587                 else
3588                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3589                                       TGT_DM_CMD);
3590                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3591                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3592                 hwi_write_buffer(pwrb, task);
3593                 break;
3594         case ISCSI_OP_NOOP_OUT:
3595                 if (ring_mode)
3596                         io_task->psgl_handle->type = INI_RD_CMD;
3597                 else
3598                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3599                                       INI_RD_CMD);
3600                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3601                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3602                 else
3603                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3604                 hwi_write_buffer(pwrb, task);
3605                 break;
3606         case ISCSI_OP_TEXT:
3607                 if (ring_mode)
3608                         io_task->psgl_handle->type = INI_WR_CMD;
3609                 else
3610                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3611                                       INI_WR_CMD);
3612                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3613                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3614                 hwi_write_buffer(pwrb, task);
3615                 break;
3616         case ISCSI_OP_SCSI_TMFUNC:
3617                 session = conn->session;
3618                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3619                 phwi_ctrlr = phba->phwi_ctrlr;
3620                 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3621                                             phba->fw_config.iscsi_cid_start];
3622                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3623                                                                 >> 16];
3624                 aborted_task = pwrb_handle->pio_handle;
3625                  if (!aborted_task)
3626                         return 0;
3627
3628                 aborted_io_task = aborted_task->dd_data;
3629                 if (!aborted_io_task->scsi_cmnd)
3630                         return 0;
3631
3632                 tag = mgmt_invalidate_icds(phba,
3633                                      aborted_io_task->psgl_handle->sgl_index,
3634                                      cid);
3635                 if (!tag) {
3636                         shost_printk(KERN_WARNING, phba->shost,
3637                                      "mgmt_invalidate_icds could not be"
3638                                      " submitted\n");
3639                 } else {
3640                         wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3641                                                  phba->ctrl.mcc_numtag[tag]);
3642                         free_mcc_tag(&phba->ctrl, tag);
3643                 }
3644                 if (ring_mode)
3645                         io_task->psgl_handle->type = INI_TMF_CMD;
3646                 else
3647                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3648                                       INI_TMF_CMD);
3649                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3650                 hwi_write_buffer(pwrb, task);
3651                 break;
3652         case ISCSI_OP_LOGOUT:
3653                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3654                 if (ring_mode)
3655                         io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3656                 else
3657                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3658                                 HWH_TYPE_LOGOUT);
3659                 hwi_write_buffer(pwrb, task);
3660                 break;
3661
3662         default:
3663                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3664                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3665                 return -EINVAL;
3666         }
3667
3668         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3669                       task->data_count);
3670         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3671                       io_task->pwrb_handle->nxt_wrb_index);
3672         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3673
3674         doorbell |= cid & DB_WRB_POST_CID_MASK;
3675         if (!ring_mode)
3676                 doorbell |= (io_task->pwrb_handle->wrb_index &
3677                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3678         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3679         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3680         return 0;
3681 }
3682
3683 static int beiscsi_task_xmit(struct iscsi_task *task)
3684 {
3685         struct iscsi_conn *conn = task->conn;
3686         struct beiscsi_io_task *io_task = task->dd_data;
3687         struct scsi_cmnd *sc = task->sc;
3688         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3689         struct scatterlist *sg;
3690         int num_sg;
3691         unsigned int  writedir = 0, xferlen = 0;
3692
3693         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3694                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3695                  task, conn, beiscsi_conn);
3696         if (!sc)
3697                 return beiscsi_mtask(task);
3698
3699         io_task->scsi_cmnd = sc;
3700         num_sg = scsi_dma_map(sc);
3701         if (num_sg < 0) {
3702                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3703                 return num_sg;
3704         }
3705         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3706                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3707         xferlen = scsi_bufflen(sc);
3708         sg = scsi_sglist(sc);
3709         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3710                 writedir = 1;
3711                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3712                          task->imm_count);
3713         } else
3714                 writedir = 0;
3715         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3716 }
3717
3718 static void beiscsi_remove(struct pci_dev *pcidev)
3719 {
3720         struct beiscsi_hba *phba = NULL;
3721         struct hwi_controller *phwi_ctrlr;
3722         struct hwi_context_memory *phwi_context;
3723         struct be_eq_obj *pbe_eq;
3724         unsigned int i, msix_vec;
3725
3726         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3727         if (!phba) {
3728                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3729                 return;
3730         }
3731
3732         phwi_ctrlr = phba->phwi_ctrlr;
3733         phwi_context = phwi_ctrlr->phwi_ctxt;
3734         hwi_disable_intr(phba);
3735         if (phba->msix_enabled) {
3736                 for (i = 0; i <= phba->num_cpus; i++) {
3737                         msix_vec = phba->msix_entries[i].vector;
3738                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3739                 }
3740         } else
3741                 if (phba->pcidev->irq)
3742                         free_irq(phba->pcidev->irq, phba);
3743         pci_disable_msix(phba->pcidev);
3744         destroy_workqueue(phba->wq);
3745         if (blk_iopoll_enabled)
3746                 for (i = 0; i < phba->num_cpus; i++) {
3747                         pbe_eq = &phwi_context->be_eq[i];
3748                         blk_iopoll_disable(&pbe_eq->iopoll);
3749                 }
3750
3751         beiscsi_clean_port(phba);
3752         beiscsi_free_mem(phba);
3753         beiscsi_unmap_pci_function(phba);
3754         pci_free_consistent(phba->pcidev,
3755                             phba->ctrl.mbox_mem_alloced.size,
3756                             phba->ctrl.mbox_mem_alloced.va,
3757                             phba->ctrl.mbox_mem_alloced.dma);
3758         iscsi_host_remove(phba->shost);
3759         pci_dev_put(phba->pcidev);
3760         iscsi_host_free(phba->shost);
3761 }
3762
3763 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3764 {
3765         int i, status;
3766
3767         for (i = 0; i <= phba->num_cpus; i++)
3768                 phba->msix_entries[i].entry = i;
3769
3770         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3771                                  (phba->num_cpus + 1));
3772         if (!status)
3773                 phba->msix_enabled = true;
3774
3775         return;
3776 }
3777
3778 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3779                                 const struct pci_device_id *id)
3780 {
3781         struct beiscsi_hba *phba = NULL;
3782         struct hwi_controller *phwi_ctrlr;
3783         struct hwi_context_memory *phwi_context;
3784         struct be_eq_obj *pbe_eq;
3785         int ret, msix_vec, num_cpus, i;
3786
3787         ret = beiscsi_enable_pci(pcidev);
3788         if (ret < 0) {
3789                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3790                              "Failed to enable pci device \n");
3791                 return ret;
3792         }
3793
3794         phba = beiscsi_hba_alloc(pcidev);
3795         if (!phba) {
3796                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3797                         " Failed in beiscsi_hba_alloc \n");
3798                 goto disable_pci;
3799         }
3800         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3801
3802         if (enable_msix)
3803                 num_cpus = find_num_cpus();
3804         else
3805                 num_cpus = 1;
3806         phba->num_cpus = num_cpus;
3807         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3808
3809         if (enable_msix)
3810                 beiscsi_msix_enable(phba);
3811         ret = be_ctrl_init(phba, pcidev);
3812         if (ret) {
3813                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3814                                 "Failed in be_ctrl_init\n");
3815                 goto hba_free;
3816         }
3817
3818         spin_lock_init(&phba->io_sgl_lock);
3819         spin_lock_init(&phba->mgmt_sgl_lock);
3820         spin_lock_init(&phba->isr_lock);
3821         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3822         if (ret != 0) {
3823                 shost_printk(KERN_ERR, phba->shost,
3824                              "Error getting fw config\n");
3825                 goto free_port;
3826         }
3827         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3828         beiscsi_get_params(phba);
3829         phba->shost->can_queue = phba->params.ios_per_ctrl;
3830         ret = beiscsi_init_port(phba);
3831         if (ret < 0) {
3832                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3833                              "Failed in beiscsi_init_port\n");
3834                 goto free_port;
3835         }
3836
3837         for (i = 0; i < MAX_MCC_CMD ; i++) {
3838                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3839                 phba->ctrl.mcc_tag[i] = i + 1;
3840                 phba->ctrl.mcc_numtag[i + 1] = 0;
3841                 phba->ctrl.mcc_tag_available++;
3842         }
3843
3844         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3845
3846         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3847                  phba->shost->host_no);
3848         phba->wq = create_workqueue(phba->wq_name);
3849         if (!phba->wq) {
3850                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3851                                 "Failed to allocate work queue\n");
3852                 goto free_twq;
3853         }
3854
3855         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3856
3857         phwi_ctrlr = phba->phwi_ctrlr;
3858         phwi_context = phwi_ctrlr->phwi_ctxt;
3859         if (blk_iopoll_enabled) {
3860                 for (i = 0; i < phba->num_cpus; i++) {
3861                         pbe_eq = &phwi_context->be_eq[i];
3862                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3863                                         be_iopoll);
3864                         blk_iopoll_enable(&pbe_eq->iopoll);
3865                 }
3866         }
3867         ret = beiscsi_init_irqs(phba);
3868         if (ret < 0) {
3869                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3870                              "Failed to beiscsi_init_irqs\n");
3871                 goto free_blkenbld;
3872         }
3873         ret = hwi_enable_intr(phba);
3874         if (ret < 0) {
3875                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3876                              "Failed to hwi_enable_intr\n");
3877                 goto free_ctrlr;
3878         }
3879         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3880         return 0;
3881
3882 free_ctrlr:
3883         if (phba->msix_enabled) {
3884                 for (i = 0; i <= phba->num_cpus; i++) {
3885                         msix_vec = phba->msix_entries[i].vector;
3886                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3887                 }
3888         } else
3889                 if (phba->pcidev->irq)
3890                         free_irq(phba->pcidev->irq, phba);
3891         pci_disable_msix(phba->pcidev);
3892 free_blkenbld:
3893         destroy_workqueue(phba->wq);
3894         if (blk_iopoll_enabled)
3895                 for (i = 0; i < phba->num_cpus; i++) {
3896                         pbe_eq = &phwi_context->be_eq[i];
3897                         blk_iopoll_disable(&pbe_eq->iopoll);
3898                 }
3899 free_twq:
3900         beiscsi_clean_port(phba);
3901         beiscsi_free_mem(phba);
3902 free_port:
3903         pci_free_consistent(phba->pcidev,
3904                             phba->ctrl.mbox_mem_alloced.size,
3905                             phba->ctrl.mbox_mem_alloced.va,
3906                            phba->ctrl.mbox_mem_alloced.dma);
3907         beiscsi_unmap_pci_function(phba);
3908 hba_free:
3909         iscsi_host_remove(phba->shost);
3910         pci_dev_put(phba->pcidev);
3911         iscsi_host_free(phba->shost);
3912 disable_pci:
3913         pci_disable_device(pcidev);
3914         return ret;
3915 }
3916
3917 struct iscsi_transport beiscsi_iscsi_transport = {
3918         .owner = THIS_MODULE,
3919         .name = DRV_NAME,
3920         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3921                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3922         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3923                 ISCSI_MAX_XMIT_DLENGTH |
3924                 ISCSI_HDRDGST_EN |
3925                 ISCSI_DATADGST_EN |
3926                 ISCSI_INITIAL_R2T_EN |
3927                 ISCSI_MAX_R2T |
3928                 ISCSI_IMM_DATA_EN |
3929                 ISCSI_FIRST_BURST |
3930                 ISCSI_MAX_BURST |
3931                 ISCSI_PDU_INORDER_EN |
3932                 ISCSI_DATASEQ_INORDER_EN |
3933                 ISCSI_ERL |
3934                 ISCSI_CONN_PORT |
3935                 ISCSI_CONN_ADDRESS |
3936                 ISCSI_EXP_STATSN |
3937                 ISCSI_PERSISTENT_PORT |
3938                 ISCSI_PERSISTENT_ADDRESS |
3939                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3940                 ISCSI_USERNAME | ISCSI_PASSWORD |
3941                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3942                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3943                 ISCSI_LU_RESET_TMO |
3944                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3945                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3946         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3947                                 ISCSI_HOST_INITIATOR_NAME,
3948         .create_session = beiscsi_session_create,
3949         .destroy_session = beiscsi_session_destroy,
3950         .create_conn = beiscsi_conn_create,
3951         .bind_conn = beiscsi_conn_bind,
3952         .destroy_conn = iscsi_conn_teardown,
3953         .set_param = beiscsi_set_param,
3954         .get_conn_param = beiscsi_conn_get_param,
3955         .get_session_param = iscsi_session_get_param,
3956         .get_host_param = beiscsi_get_host_param,
3957         .start_conn = beiscsi_conn_start,
3958         .stop_conn = beiscsi_conn_stop,
3959         .send_pdu = iscsi_conn_send_pdu,
3960         .xmit_task = beiscsi_task_xmit,
3961         .cleanup_task = beiscsi_cleanup_task,
3962         .alloc_pdu = beiscsi_alloc_pdu,
3963         .parse_pdu_itt = beiscsi_parse_pdu,
3964         .get_stats = beiscsi_conn_get_stats,
3965         .ep_connect = beiscsi_ep_connect,
3966         .ep_poll = beiscsi_ep_poll,
3967         .ep_disconnect = beiscsi_ep_disconnect,
3968         .session_recovery_timedout = iscsi_session_recovery_timedout,
3969 };
3970
3971 static struct pci_driver beiscsi_pci_driver = {
3972         .name = DRV_NAME,
3973         .probe = beiscsi_dev_probe,
3974         .remove = beiscsi_remove,
3975         .id_table = beiscsi_pci_id_table
3976 };
3977
3978
3979 static int __init beiscsi_module_init(void)
3980 {
3981         int ret;
3982
3983         beiscsi_scsi_transport =
3984                         iscsi_register_transport(&beiscsi_iscsi_transport);
3985         if (!beiscsi_scsi_transport) {
3986                 SE_DEBUG(DBG_LVL_1,
3987                          "beiscsi_module_init - Unable to  register beiscsi"
3988                          "transport.\n");
3989                 ret = -ENOMEM;
3990         }
3991         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3992                  &beiscsi_iscsi_transport);
3993
3994         ret = pci_register_driver(&beiscsi_pci_driver);
3995         if (ret) {
3996                 SE_DEBUG(DBG_LVL_1,
3997                          "beiscsi_module_init - Unable to  register"
3998                          "beiscsi pci driver.\n");
3999                 goto unregister_iscsi_transport;
4000         }
4001         ring_mode = 0;
4002         return 0;
4003
4004 unregister_iscsi_transport:
4005         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4006         return ret;
4007 }
4008
4009 static void __exit beiscsi_module_exit(void)
4010 {
4011         pci_unregister_driver(&beiscsi_pci_driver);
4012         iscsi_unregister_transport(&beiscsi_iscsi_transport);
4013 }
4014
4015 module_init(beiscsi_module_init);
4016 module_exit(beiscsi_module_exit);