[SCSI] be2iscsi: Adding msix and mcc_rings V3
[safe/jmp/linux-2.6] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43
44 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
46 MODULE_AUTHOR("ServerEngines Corporation");
47 MODULE_LICENSE("GPL");
48 module_param(be_iopoll_budget, int, 0);
49 module_param(enable_msix, int, 0);
50 module_param(be_max_phys_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
52                                    "contiguous memory that can be allocated."
53                                    "Range is 16 - 128");
54
55 static int beiscsi_slave_configure(struct scsi_device *sdev)
56 {
57         blk_queue_max_segment_size(sdev->request_queue, 65536);
58         return 0;
59 }
60
61 /*------------------- PCI Driver operations and data ----------------- */
62 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
63         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
64         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
68         { 0 }
69 };
70 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
71
72 static struct scsi_host_template beiscsi_sht = {
73         .module = THIS_MODULE,
74         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
75         .proc_name = DRV_NAME,
76         .queuecommand = iscsi_queuecommand,
77         .eh_abort_handler = iscsi_eh_abort,
78         .change_queue_depth = iscsi_change_queue_depth,
79         .slave_configure = beiscsi_slave_configure,
80         .target_alloc = iscsi_target_alloc,
81         .eh_device_reset_handler = iscsi_eh_device_reset,
82         .eh_target_reset_handler = iscsi_eh_target_reset,
83         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
84         .can_queue = BE2_IO_DEPTH,
85         .this_id = -1,
86         .max_sectors = BEISCSI_MAX_SECTORS,
87         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
88         .use_clustering = ENABLE_CLUSTERING,
89 };
90
91 static struct scsi_transport_template *beiscsi_scsi_transport;
92
93 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
94 {
95         struct beiscsi_hba *phba;
96         struct Scsi_Host *shost;
97
98         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
99         if (!shost) {
100                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
101                         "iscsi_host_alloc failed \n");
102                 return NULL;
103         }
104         shost->dma_boundary = pcidev->dma_mask;
105         shost->max_id = BE2_MAX_SESSIONS;
106         shost->max_channel = 0;
107         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
108         shost->max_lun = BEISCSI_NUM_MAX_LUN;
109         shost->transportt = beiscsi_scsi_transport;
110         phba = iscsi_host_priv(shost);
111         memset(phba, 0, sizeof(*phba));
112         phba->shost = shost;
113         phba->pcidev = pci_dev_get(pcidev);
114
115         if (iscsi_host_add(shost, &phba->pcidev->dev))
116                 goto free_devices;
117         return phba;
118
119 free_devices:
120         pci_dev_put(phba->pcidev);
121         iscsi_host_free(phba->shost);
122         return NULL;
123 }
124
125 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
126 {
127         if (phba->csr_va) {
128                 iounmap(phba->csr_va);
129                 phba->csr_va = NULL;
130         }
131         if (phba->db_va) {
132                 iounmap(phba->db_va);
133                 phba->db_va = NULL;
134         }
135         if (phba->pci_va) {
136                 iounmap(phba->pci_va);
137                 phba->pci_va = NULL;
138         }
139 }
140
141 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
142                                 struct pci_dev *pcidev)
143 {
144         u8 __iomem *addr;
145
146         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
147                                pci_resource_len(pcidev, 2));
148         if (addr == NULL)
149                 return -ENOMEM;
150         phba->ctrl.csr = addr;
151         phba->csr_va = addr;
152         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
153
154         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
155         if (addr == NULL)
156                 goto pci_map_err;
157         phba->ctrl.db = addr;
158         phba->db_va = addr;
159         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
160
161         addr = ioremap_nocache(pci_resource_start(pcidev, 1),
162                                pci_resource_len(pcidev, 1));
163         if (addr == NULL)
164                 goto pci_map_err;
165         phba->ctrl.pcicfg = addr;
166         phba->pci_va = addr;
167         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
168         return 0;
169
170 pci_map_err:
171         beiscsi_unmap_pci_function(phba);
172         return -ENOMEM;
173 }
174
175 static int beiscsi_enable_pci(struct pci_dev *pcidev)
176 {
177         int ret;
178
179         ret = pci_enable_device(pcidev);
180         if (ret) {
181                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
182                         "failed. Returning -ENODEV\n");
183                 return ret;
184         }
185
186         pci_set_master(pcidev);
187         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
188                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
189                 if (ret) {
190                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
191                         pci_disable_device(pcidev);
192                         return ret;
193                 }
194         }
195         return 0;
196 }
197
198 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
199 {
200         struct be_ctrl_info *ctrl = &phba->ctrl;
201         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
202         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
203         int status = 0;
204
205         ctrl->pdev = pdev;
206         status = beiscsi_map_pci_bars(phba, pdev);
207         if (status)
208                 return status;
209         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
210         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
211                                                   mbox_mem_alloc->size,
212                                                   &mbox_mem_alloc->dma);
213         if (!mbox_mem_alloc->va) {
214                 beiscsi_unmap_pci_function(phba);
215                 status = -ENOMEM;
216                 return status;
217         }
218
219         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
220         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
221         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
222         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
223         spin_lock_init(&ctrl->mbox_lock);
224         spin_lock_init(&phba->ctrl.mcc_lock);
225         spin_lock_init(&phba->ctrl.mcc_cq_lock);
226
227         return status;
228 }
229
230 static void beiscsi_get_params(struct beiscsi_hba *phba)
231 {
232         phba->params.ios_per_ctrl = BE2_IO_DEPTH;
233         phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
234         phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
235         phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
236         phba->params.num_sge_per_io = BE2_SGE;
237         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
238         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
239         phba->params.eq_timer = 64;
240         phba->params.num_eq_entries =
241             (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
242                                                                 512) + 1) * 512;
243         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
244                                 ? 1024 : phba->params.num_eq_entries;
245         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
246                  phba->params.num_eq_entries);
247         phba->params.num_cq_entries =
248             (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
249                                                                 512) + 1) * 512;
250         SE_DEBUG(DBG_LVL_8,
251                 "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
252                 "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
253                 phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
254                 BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
255         phba->params.wrbs_per_cxn = 256;
256 }
257
258 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
259                            unsigned int id, unsigned int clr_interrupt,
260                            unsigned int num_processed,
261                            unsigned char rearm, unsigned char event)
262 {
263         u32 val = 0;
264         val |= id & DB_EQ_RING_ID_MASK;
265         if (rearm)
266                 val |= 1 << DB_EQ_REARM_SHIFT;
267         if (clr_interrupt)
268                 val |= 1 << DB_EQ_CLR_SHIFT;
269         if (event)
270                 val |= 1 << DB_EQ_EVNT_SHIFT;
271         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
272         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
273 }
274
275 /**
276  * be_isr_mcc - The isr routine of the driver.
277  * @irq: Not used
278  * @dev_id: Pointer to host adapter structure
279  */
280 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
281 {
282         struct beiscsi_hba *phba;
283         struct be_eq_entry *eqe = NULL;
284         struct be_queue_info *eq;
285         struct be_queue_info *mcc;
286         unsigned int num_eq_processed;
287         struct be_eq_obj *pbe_eq;
288         unsigned long flags;
289
290         pbe_eq = dev_id;
291         eq = &pbe_eq->q;
292         phba =  pbe_eq->phba;
293         mcc = &phba->ctrl.mcc_obj.cq;
294         eqe = queue_tail_node(eq);
295         if (!eqe)
296                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
297
298         num_eq_processed = 0;
299
300         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
301                                 & EQE_VALID_MASK) {
302                 if (((eqe->dw[offsetof(struct amap_eq_entry,
303                      resource_id) / 32] &
304                      EQE_RESID_MASK) >> 16) == mcc->id) {
305                         spin_lock_irqsave(&phba->isr_lock, flags);
306                         phba->todo_mcc_cq = 1;
307                         spin_unlock_irqrestore(&phba->isr_lock, flags);
308                 }
309                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
310                 queue_tail_inc(eq);
311                 eqe = queue_tail_node(eq);
312                 num_eq_processed++;
313         }
314         if (phba->todo_mcc_cq)
315                 queue_work(phba->wq, &phba->work_cqs);
316         if (num_eq_processed)
317                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
318
319         return IRQ_HANDLED;
320 }
321
322 /**
323  * be_isr_msix - The isr routine of the driver.
324  * @irq: Not used
325  * @dev_id: Pointer to host adapter structure
326  */
327 static irqreturn_t be_isr_msix(int irq, void *dev_id)
328 {
329         struct beiscsi_hba *phba;
330         struct be_eq_entry *eqe = NULL;
331         struct be_queue_info *eq;
332         struct be_queue_info *cq;
333         unsigned int num_eq_processed;
334         struct be_eq_obj *pbe_eq;
335         unsigned long flags;
336
337         pbe_eq = dev_id;
338         eq = &pbe_eq->q;
339         cq = pbe_eq->cq;
340         eqe = queue_tail_node(eq);
341         if (!eqe)
342                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
343
344         phba = pbe_eq->phba;
345         num_eq_processed = 0;
346         if (blk_iopoll_enabled) {
347                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
348                                         & EQE_VALID_MASK) {
349                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
350                                 blk_iopoll_sched(&pbe_eq->iopoll);
351
352                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
353                         queue_tail_inc(eq);
354                         eqe = queue_tail_node(eq);
355                         num_eq_processed++;
356                 }
357                 if (num_eq_processed)
358                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
359
360                 return IRQ_HANDLED;
361         } else {
362                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
363                                                 & EQE_VALID_MASK) {
364                         spin_lock_irqsave(&phba->isr_lock, flags);
365                         phba->todo_cq = 1;
366                         spin_unlock_irqrestore(&phba->isr_lock, flags);
367                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
368                         queue_tail_inc(eq);
369                         eqe = queue_tail_node(eq);
370                         num_eq_processed++;
371                 }
372                 if (phba->todo_cq)
373                         queue_work(phba->wq, &phba->work_cqs);
374
375                 if (num_eq_processed)
376                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
377
378                 return IRQ_HANDLED;
379         }
380 }
381
382 /**
383  * be_isr - The isr routine of the driver.
384  * @irq: Not used
385  * @dev_id: Pointer to host adapter structure
386  */
387 static irqreturn_t be_isr(int irq, void *dev_id)
388 {
389         struct beiscsi_hba *phba;
390         struct hwi_controller *phwi_ctrlr;
391         struct hwi_context_memory *phwi_context;
392         struct be_eq_entry *eqe = NULL;
393         struct be_queue_info *eq;
394         struct be_queue_info *cq;
395         struct be_queue_info *mcc;
396         unsigned long flags, index;
397         unsigned int num_mcceq_processed, num_ioeq_processed;
398         struct be_ctrl_info *ctrl;
399         struct be_eq_obj *pbe_eq;
400         int isr;
401
402         phba = dev_id;
403         ctrl = &phba->ctrl;;
404         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
405                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
406         if (!isr)
407                 return IRQ_NONE;
408
409         phwi_ctrlr = phba->phwi_ctrlr;
410         phwi_context = phwi_ctrlr->phwi_ctxt;
411         pbe_eq = &phwi_context->be_eq[0];
412
413         eq = &phwi_context->be_eq[0].q;
414         mcc = &phba->ctrl.mcc_obj.cq;
415         index = 0;
416         eqe = queue_tail_node(eq);
417         if (!eqe)
418                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
419
420         num_ioeq_processed = 0;
421         num_mcceq_processed = 0;
422         if (blk_iopoll_enabled) {
423                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
424                                         & EQE_VALID_MASK) {
425                         if (((eqe->dw[offsetof(struct amap_eq_entry,
426                              resource_id) / 32] &
427                              EQE_RESID_MASK) >> 16) == mcc->id) {
428                                 spin_lock_irqsave(&phba->isr_lock, flags);
429                                 phba->todo_mcc_cq = 1;
430                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
431                                 num_mcceq_processed++;
432                         } else {
433                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
434                                         blk_iopoll_sched(&pbe_eq->iopoll);
435                                 num_ioeq_processed++;
436                         }
437                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
438                         queue_tail_inc(eq);
439                         eqe = queue_tail_node(eq);
440                 }
441                 if (num_ioeq_processed || num_mcceq_processed) {
442                         if (phba->todo_mcc_cq)
443                                 queue_work(phba->wq, &phba->work_cqs);
444
445                 if ((num_mcceq_processed) && (!num_ioeq_processed))
446                                 hwi_ring_eq_db(phba, eq->id, 0,
447                                               (num_ioeq_processed +
448                                                num_mcceq_processed) , 1, 1);
449                         else
450                                 hwi_ring_eq_db(phba, eq->id, 0,
451                                                (num_ioeq_processed +
452                                                 num_mcceq_processed), 0, 1);
453
454                         return IRQ_HANDLED;
455                 } else
456                         return IRQ_NONE;
457         } else {
458                 cq = &phwi_context->be_cq[0];
459                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
460                                                 & EQE_VALID_MASK) {
461
462                         if (((eqe->dw[offsetof(struct amap_eq_entry,
463                              resource_id) / 32] &
464                              EQE_RESID_MASK) >> 16) != cq->id) {
465                                 spin_lock_irqsave(&phba->isr_lock, flags);
466                                 phba->todo_mcc_cq = 1;
467                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
468                         } else {
469                                 spin_lock_irqsave(&phba->isr_lock, flags);
470                                 phba->todo_cq = 1;
471                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
472                         }
473                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
474                         queue_tail_inc(eq);
475                         eqe = queue_tail_node(eq);
476                         num_ioeq_processed++;
477                 }
478                 if (phba->todo_cq || phba->todo_mcc_cq)
479                         queue_work(phba->wq, &phba->work_cqs);
480
481                 if (num_ioeq_processed) {
482                         hwi_ring_eq_db(phba, eq->id, 0,
483                                        num_ioeq_processed, 1, 1);
484                         return IRQ_HANDLED;
485                 } else
486                         return IRQ_NONE;
487         }
488 }
489
490 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
491 {
492         struct pci_dev *pcidev = phba->pcidev;
493         struct hwi_controller *phwi_ctrlr;
494         struct hwi_context_memory *phwi_context;
495         int ret, msix_vec, i = 0;
496         char desc[32];
497
498         phwi_ctrlr = phba->phwi_ctrlr;
499         phwi_context = phwi_ctrlr->phwi_ctxt;
500
501         if (phba->msix_enabled) {
502                 for (i = 0; i < phba->num_cpus; i++) {
503                         sprintf(desc, "beiscsi_msix_%04x", i);
504                         msix_vec = phba->msix_entries[i].vector;
505                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
506                                           &phwi_context->be_eq[i]);
507                 }
508                 msix_vec = phba->msix_entries[i].vector;
509                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
510                                   &phwi_context->be_eq[i]);
511         } else {
512                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
513                                   "beiscsi", phba);
514                 if (ret) {
515                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
516                                      "Failed to register irq\\n");
517                         return ret;
518                 }
519         }
520         return 0;
521 }
522
523 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
524                            unsigned int id, unsigned int num_processed,
525                            unsigned char rearm, unsigned char event)
526 {
527         u32 val = 0;
528         val |= id & DB_CQ_RING_ID_MASK;
529         if (rearm)
530                 val |= 1 << DB_CQ_REARM_SHIFT;
531         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
532         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
533 }
534
535 static unsigned int
536 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
537                           struct beiscsi_hba *phba,
538                           unsigned short cid,
539                           struct pdu_base *ppdu,
540                           unsigned long pdu_len,
541                           void *pbuffer, unsigned long buf_len)
542 {
543         struct iscsi_conn *conn = beiscsi_conn->conn;
544         struct iscsi_session *session = conn->session;
545         struct iscsi_task *task;
546         struct beiscsi_io_task *io_task;
547         struct iscsi_hdr *login_hdr;
548
549         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
550                                                 PDUBASE_OPCODE_MASK) {
551         case ISCSI_OP_NOOP_IN:
552                 pbuffer = NULL;
553                 buf_len = 0;
554                 break;
555         case ISCSI_OP_ASYNC_EVENT:
556                 break;
557         case ISCSI_OP_REJECT:
558                 WARN_ON(!pbuffer);
559                 WARN_ON(!(buf_len == 48));
560                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
561                 break;
562         case ISCSI_OP_LOGIN_RSP:
563                 task = conn->login_task;
564                 io_task = task->dd_data;
565                 login_hdr = (struct iscsi_hdr *)ppdu;
566                 login_hdr->itt = io_task->libiscsi_itt;
567                 break;
568         default:
569                 shost_printk(KERN_WARNING, phba->shost,
570                              "Unrecognized opcode 0x%x in async msg \n",
571                              (ppdu->
572                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
573                                                 & PDUBASE_OPCODE_MASK));
574                 return 1;
575         }
576
577         spin_lock_bh(&session->lock);
578         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
579         spin_unlock_bh(&session->lock);
580         return 0;
581 }
582
583 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
584 {
585         struct sgl_handle *psgl_handle;
586
587         if (phba->io_sgl_hndl_avbl) {
588                 SE_DEBUG(DBG_LVL_8,
589                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
590                          phba->io_sgl_alloc_index);
591                 psgl_handle = phba->io_sgl_hndl_base[phba->
592                                                 io_sgl_alloc_index];
593                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
594                 phba->io_sgl_hndl_avbl--;
595                 if (phba->io_sgl_alloc_index == (phba->params.
596                                                  ios_per_ctrl - 1))
597                         phba->io_sgl_alloc_index = 0;
598                 else
599                         phba->io_sgl_alloc_index++;
600         } else
601                 psgl_handle = NULL;
602         return psgl_handle;
603 }
604
605 static void
606 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
607 {
608         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
609                  phba->io_sgl_free_index);
610         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
611                 /*
612                  * this can happen if clean_task is called on a task that
613                  * failed in xmit_task or alloc_pdu.
614                  */
615                  SE_DEBUG(DBG_LVL_8,
616                          "Double Free in IO SGL io_sgl_free_index=%d,"
617                          "value there=%p \n", phba->io_sgl_free_index,
618                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
619                 return;
620         }
621         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
622         phba->io_sgl_hndl_avbl++;
623         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
624                 phba->io_sgl_free_index = 0;
625         else
626                 phba->io_sgl_free_index++;
627 }
628
629 /**
630  * alloc_wrb_handle - To allocate a wrb handle
631  * @phba: The hba pointer
632  * @cid: The cid to use for allocation
633  * @index: index allocation and wrb index
634  *
635  * This happens under session_lock until submission to chip
636  */
637 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
638                                     int index)
639 {
640         struct hwi_wrb_context *pwrb_context;
641         struct hwi_controller *phwi_ctrlr;
642         struct wrb_handle *pwrb_handle;
643
644         phwi_ctrlr = phba->phwi_ctrlr;
645         pwrb_context = &phwi_ctrlr->wrb_context[cid];
646         if (pwrb_context->wrb_handles_available) {
647                 pwrb_handle = pwrb_context->pwrb_handle_base[
648                                             pwrb_context->alloc_index];
649                 pwrb_context->wrb_handles_available--;
650                 pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
651                 if (pwrb_context->alloc_index ==
652                                                 (phba->params.wrbs_per_cxn - 1))
653                         pwrb_context->alloc_index = 0;
654                 else
655                         pwrb_context->alloc_index++;
656         } else
657                 pwrb_handle = NULL;
658         return pwrb_handle;
659 }
660
661 /**
662  * free_wrb_handle - To free the wrb handle back to pool
663  * @phba: The hba pointer
664  * @pwrb_context: The context to free from
665  * @pwrb_handle: The wrb_handle to free
666  *
667  * This happens under session_lock until submission to chip
668  */
669 static void
670 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
671                 struct wrb_handle *pwrb_handle)
672 {
673
674         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
675         pwrb_context->wrb_handles_available++;
676         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
677                 pwrb_context->free_index = 0;
678         else
679                 pwrb_context->free_index++;
680
681         SE_DEBUG(DBG_LVL_8,
682                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
683                  "wrb_handles_available=%d \n",
684                  pwrb_handle, pwrb_context->free_index,
685                  pwrb_context->wrb_handles_available);
686 }
687
688 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
689 {
690         struct sgl_handle *psgl_handle;
691
692         if (phba->eh_sgl_hndl_avbl) {
693                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
694                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
695                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
696                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
697                 phba->eh_sgl_hndl_avbl--;
698                 if (phba->eh_sgl_alloc_index ==
699                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
700                      1))
701                         phba->eh_sgl_alloc_index = 0;
702                 else
703                         phba->eh_sgl_alloc_index++;
704         } else
705                 psgl_handle = NULL;
706         return psgl_handle;
707 }
708
709 void
710 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
711 {
712
713         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
714                              phba->eh_sgl_free_index);
715         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
716                 /*
717                  * this can happen if clean_task is called on a task that
718                  * failed in xmit_task or alloc_pdu.
719                  */
720                 SE_DEBUG(DBG_LVL_8,
721                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
722                          phba->eh_sgl_free_index);
723                 return;
724         }
725         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
726         phba->eh_sgl_hndl_avbl++;
727         if (phba->eh_sgl_free_index ==
728             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
729                 phba->eh_sgl_free_index = 0;
730         else
731                 phba->eh_sgl_free_index++;
732 }
733
734 static void
735 be_complete_io(struct beiscsi_conn *beiscsi_conn,
736                struct iscsi_task *task, struct sol_cqe *psol)
737 {
738         struct beiscsi_io_task *io_task = task->dd_data;
739         struct be_status_bhs *sts_bhs =
740                                 (struct be_status_bhs *)io_task->cmd_bhs;
741         struct iscsi_conn *conn = beiscsi_conn->conn;
742         unsigned int sense_len;
743         unsigned char *sense;
744         u32 resid = 0, exp_cmdsn, max_cmdsn;
745         u8 rsp, status, flags;
746
747         exp_cmdsn = (psol->
748                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
749                         & SOL_EXP_CMD_SN_MASK);
750         max_cmdsn = ((psol->
751                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
752                         & SOL_EXP_CMD_SN_MASK) +
753                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
754                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
755         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
756                                                 & SOL_RESP_MASK) >> 16);
757         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
758                                                 & SOL_STS_MASK) >> 8);
759         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
760                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
761
762         task->sc->result = (DID_OK << 16) | status;
763         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
764                 task->sc->result = DID_ERROR << 16;
765                 goto unmap;
766         }
767
768         /* bidi not initially supported */
769         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
770                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
771                                 32] & SOL_RES_CNT_MASK);
772
773                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
774                         task->sc->result = DID_ERROR << 16;
775
776                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
777                         scsi_set_resid(task->sc, resid);
778                         if (!status && (scsi_bufflen(task->sc) - resid <
779                             task->sc->underflow))
780                                 task->sc->result = DID_ERROR << 16;
781                 }
782         }
783
784         if (status == SAM_STAT_CHECK_CONDITION) {
785                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
786                 sense = sts_bhs->sense_info + sizeof(unsigned short);
787                 sense_len =  cpu_to_be16(*slen);
788                 memcpy(task->sc->sense_buffer, sense,
789                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
790         }
791         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
792                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
793                                                         & SOL_RES_CNT_MASK)
794                          conn->rxdata_octets += (psol->
795                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796                              & SOL_RES_CNT_MASK);
797         }
798 unmap:
799         scsi_dma_unmap(io_task->scsi_cmnd);
800         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
801 }
802
803 static void
804 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
805                    struct iscsi_task *task, struct sol_cqe *psol)
806 {
807         struct iscsi_logout_rsp *hdr;
808         struct beiscsi_io_task *io_task = task->dd_data;
809         struct iscsi_conn *conn = beiscsi_conn->conn;
810
811         hdr = (struct iscsi_logout_rsp *)task->hdr;
812         hdr->t2wait = 5;
813         hdr->t2retain = 0;
814         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
815                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
816         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
817                                         32] & SOL_RESP_MASK);
818         hdr->exp_cmdsn = cpu_to_be32(psol->
819                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
820                                         & SOL_EXP_CMD_SN_MASK);
821         hdr->max_cmdsn = be32_to_cpu((psol->
822                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
823                                         & SOL_EXP_CMD_SN_MASK) +
824                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
825                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
826         hdr->hlength = 0;
827         hdr->itt = io_task->libiscsi_itt;
828         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
829 }
830
831 static void
832 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
833                 struct iscsi_task *task, struct sol_cqe *psol)
834 {
835         struct iscsi_tm_rsp *hdr;
836         struct iscsi_conn *conn = beiscsi_conn->conn;
837         struct beiscsi_io_task *io_task = task->dd_data;
838
839         hdr = (struct iscsi_tm_rsp *)task->hdr;
840         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
841                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
842         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
843                                         32] & SOL_RESP_MASK);
844         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
845                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
846         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
847                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
848                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
849                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
850         hdr->itt = io_task->libiscsi_itt;
851         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
852 }
853
854 static void
855 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
856                        struct beiscsi_hba *phba, struct sol_cqe *psol)
857 {
858         struct hwi_wrb_context *pwrb_context;
859         struct wrb_handle *pwrb_handle = NULL;
860         struct hwi_controller *phwi_ctrlr;
861         struct iscsi_task *task;
862         struct beiscsi_io_task *io_task;
863         struct iscsi_conn *conn = beiscsi_conn->conn;
864         struct iscsi_session *session = conn->session;
865
866         phwi_ctrlr = phba->phwi_ctrlr;
867         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
868                         dw[offsetof(struct amap_sol_cqe, cid) / 32] &
869                         SOL_CID_MASK) >> 6)];
870         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
871                         dw[offsetof(struct amap_sol_cqe, wrb_index) /
872                         32] & SOL_WRB_INDEX_MASK) >> 16)];
873         task = pwrb_handle->pio_handle;
874         io_task = task->dd_data;
875         spin_lock(&phba->mgmt_sgl_lock);
876         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
877         spin_unlock(&phba->mgmt_sgl_lock);
878         spin_lock_bh(&session->lock);
879         free_wrb_handle(phba, pwrb_context, pwrb_handle);
880         spin_unlock_bh(&session->lock);
881 }
882
883 static void
884 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
885                        struct iscsi_task *task, struct sol_cqe *psol)
886 {
887         struct iscsi_nopin *hdr;
888         struct iscsi_conn *conn = beiscsi_conn->conn;
889         struct beiscsi_io_task *io_task = task->dd_data;
890
891         hdr = (struct iscsi_nopin *)task->hdr;
892         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
893                         & SOL_FLAGS_MASK) >> 24) | 0x80;
894         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
895                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
896         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
897                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
898                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
899                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
900         hdr->opcode = ISCSI_OP_NOOP_IN;
901         hdr->itt = io_task->libiscsi_itt;
902         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
903 }
904
905 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
906                              struct beiscsi_hba *phba, struct sol_cqe *psol)
907 {
908         struct hwi_wrb_context *pwrb_context;
909         struct wrb_handle *pwrb_handle;
910         struct iscsi_wrb *pwrb = NULL;
911         struct hwi_controller *phwi_ctrlr;
912         struct iscsi_task *task;
913         unsigned int type;
914         struct iscsi_conn *conn = beiscsi_conn->conn;
915         struct iscsi_session *session = conn->session;
916
917         phwi_ctrlr = phba->phwi_ctrlr;
918         pwrb_context = &phwi_ctrlr->
919                         wrb_context[((psol->dw[offsetof
920                         (struct amap_sol_cqe, cid) / 32]
921                         & SOL_CID_MASK) >> 6)];
922         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
923                         dw[offsetof(struct amap_sol_cqe, wrb_index) /
924                         32] & SOL_WRB_INDEX_MASK) >> 16)];
925         task = pwrb_handle->pio_handle;
926         pwrb = pwrb_handle->pwrb;
927         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
928                          WRB_TYPE_MASK) >> 28;
929
930         spin_lock_bh(&session->lock);
931         switch (type) {
932         case HWH_TYPE_IO:
933         case HWH_TYPE_IO_RD:
934                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
935                     ISCSI_OP_NOOP_OUT) {
936                         be_complete_nopin_resp(beiscsi_conn, task, psol);
937                 } else
938                         be_complete_io(beiscsi_conn, task, psol);
939                 break;
940
941         case HWH_TYPE_LOGOUT:
942                 be_complete_logout(beiscsi_conn, task, psol);
943                 break;
944
945         case HWH_TYPE_LOGIN:
946                 SE_DEBUG(DBG_LVL_1,
947                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
948                          "- Solicited path \n");
949                 break;
950
951         case HWH_TYPE_TMF:
952                 be_complete_tmf(beiscsi_conn, task, psol);
953                 break;
954
955         case HWH_TYPE_NOP:
956                 be_complete_nopin_resp(beiscsi_conn, task, psol);
957                 break;
958
959         default:
960                 shost_printk(KERN_WARNING, phba->shost,
961                         "In hwi_complete_cmd, unknown type = %d"
962                         "wrb_index 0x%x CID 0x%x\n", type,
963                         ((psol->dw[offsetof(struct amap_iscsi_wrb,
964                         type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
965                         ((psol->dw[offsetof(struct amap_sol_cqe,
966                         cid) / 32] & SOL_CID_MASK) >> 6));
967                 break;
968         }
969         spin_unlock_bh(&session->lock);
970 }
971
972 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
973                                           *pasync_ctx, unsigned int is_header,
974                                           unsigned int host_write_ptr)
975 {
976         if (is_header)
977                 return &pasync_ctx->async_entry[host_write_ptr].
978                     header_busy_list;
979         else
980                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
981 }
982
983 static struct async_pdu_handle *
984 hwi_get_async_handle(struct beiscsi_hba *phba,
985                      struct beiscsi_conn *beiscsi_conn,
986                      struct hwi_async_pdu_context *pasync_ctx,
987                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
988 {
989         struct be_bus_address phys_addr;
990         struct list_head *pbusy_list;
991         struct async_pdu_handle *pasync_handle = NULL;
992         int buffer_len = 0;
993         unsigned char buffer_index = -1;
994         unsigned char is_header = 0;
995
996         phys_addr.u.a32.address_lo =
997             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
998             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
999                                                 & PDUCQE_DPL_MASK) >> 16);
1000         phys_addr.u.a32.address_hi =
1001             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1002
1003         phys_addr.u.a64.address =
1004                         *((unsigned long long *)(&phys_addr.u.a64.address));
1005
1006         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1007                         & PDUCQE_CODE_MASK) {
1008         case UNSOL_HDR_NOTIFY:
1009                 is_header = 1;
1010
1011                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1012                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1013                         index) / 32] & PDUCQE_INDEX_MASK));
1014
1015                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1016                                 pasync_ctx->async_header.pa_base.u.a64.address);
1017
1018                 buffer_index = buffer_len /
1019                                 pasync_ctx->async_header.buffer_size;
1020
1021                 break;
1022         case UNSOL_DATA_NOTIFY:
1023                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1024                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1025                                         index) / 32] & PDUCQE_INDEX_MASK));
1026                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1027                                         pasync_ctx->async_data.pa_base.u.
1028                                         a64.address);
1029                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1030                 break;
1031         default:
1032                 pbusy_list = NULL;
1033                 shost_printk(KERN_WARNING, phba->shost,
1034                         "Unexpected code=%d \n",
1035                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1036                                         code) / 32] & PDUCQE_CODE_MASK);
1037                 return NULL;
1038         }
1039
1040         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1041         WARN_ON(list_empty(pbusy_list));
1042         list_for_each_entry(pasync_handle, pbusy_list, link) {
1043                 WARN_ON(pasync_handle->consumed);
1044                 if (pasync_handle->index == buffer_index)
1045                         break;
1046         }
1047
1048         WARN_ON(!pasync_handle);
1049
1050         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
1051         pasync_handle->is_header = is_header;
1052         pasync_handle->buffer_len = ((pdpdu_cqe->
1053                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1054                         & PDUCQE_DPL_MASK) >> 16);
1055
1056         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1057                         index) / 32] & PDUCQE_INDEX_MASK);
1058         return pasync_handle;
1059 }
1060
1061 static unsigned int
1062 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1063                            unsigned int is_header, unsigned int cq_index)
1064 {
1065         struct list_head *pbusy_list;
1066         struct async_pdu_handle *pasync_handle;
1067         unsigned int num_entries, writables = 0;
1068         unsigned int *pep_read_ptr, *pwritables;
1069
1070
1071         if (is_header) {
1072                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1073                 pwritables = &pasync_ctx->async_header.writables;
1074                 num_entries = pasync_ctx->async_header.num_entries;
1075         } else {
1076                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1077                 pwritables = &pasync_ctx->async_data.writables;
1078                 num_entries = pasync_ctx->async_data.num_entries;
1079         }
1080
1081         while ((*pep_read_ptr) != cq_index) {
1082                 (*pep_read_ptr)++;
1083                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1084
1085                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1086                                                      *pep_read_ptr);
1087                 if (writables == 0)
1088                         WARN_ON(list_empty(pbusy_list));
1089
1090                 if (!list_empty(pbusy_list)) {
1091                         pasync_handle = list_entry(pbusy_list->next,
1092                                                    struct async_pdu_handle,
1093                                                    link);
1094                         WARN_ON(!pasync_handle);
1095                         pasync_handle->consumed = 1;
1096                 }
1097
1098                 writables++;
1099         }
1100
1101         if (!writables) {
1102                 SE_DEBUG(DBG_LVL_1,
1103                          "Duplicate notification received - index 0x%x!!\n",
1104                          cq_index);
1105                 WARN_ON(1);
1106         }
1107
1108         *pwritables = *pwritables + writables;
1109         return 0;
1110 }
1111
1112 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1113                                        unsigned int cri)
1114 {
1115         struct hwi_controller *phwi_ctrlr;
1116         struct hwi_async_pdu_context *pasync_ctx;
1117         struct async_pdu_handle *pasync_handle, *tmp_handle;
1118         struct list_head *plist;
1119         unsigned int i = 0;
1120
1121         phwi_ctrlr = phba->phwi_ctrlr;
1122         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1123
1124         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1125
1126         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1127                 list_del(&pasync_handle->link);
1128
1129                 if (i == 0) {
1130                         list_add_tail(&pasync_handle->link,
1131                                       &pasync_ctx->async_header.free_list);
1132                         pasync_ctx->async_header.free_entries++;
1133                         i++;
1134                 } else {
1135                         list_add_tail(&pasync_handle->link,
1136                                       &pasync_ctx->async_data.free_list);
1137                         pasync_ctx->async_data.free_entries++;
1138                         i++;
1139                 }
1140         }
1141
1142         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1143         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1144         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1145         return 0;
1146 }
1147
1148 static struct phys_addr *
1149 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1150                      unsigned int is_header, unsigned int host_write_ptr)
1151 {
1152         struct phys_addr *pasync_sge = NULL;
1153
1154         if (is_header)
1155                 pasync_sge = pasync_ctx->async_header.ring_base;
1156         else
1157                 pasync_sge = pasync_ctx->async_data.ring_base;
1158
1159         return pasync_sge + host_write_ptr;
1160 }
1161
1162 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1163                                    unsigned int is_header)
1164 {
1165         struct hwi_controller *phwi_ctrlr;
1166         struct hwi_async_pdu_context *pasync_ctx;
1167         struct async_pdu_handle *pasync_handle;
1168         struct list_head *pfree_link, *pbusy_list;
1169         struct phys_addr *pasync_sge;
1170         unsigned int ring_id, num_entries;
1171         unsigned int host_write_num;
1172         unsigned int writables;
1173         unsigned int i = 0;
1174         u32 doorbell = 0;
1175
1176         phwi_ctrlr = phba->phwi_ctrlr;
1177         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1178
1179         if (is_header) {
1180                 num_entries = pasync_ctx->async_header.num_entries;
1181                 writables = min(pasync_ctx->async_header.writables,
1182                                 pasync_ctx->async_header.free_entries);
1183                 pfree_link = pasync_ctx->async_header.free_list.next;
1184                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1185                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1186         } else {
1187                 num_entries = pasync_ctx->async_data.num_entries;
1188                 writables = min(pasync_ctx->async_data.writables,
1189                                 pasync_ctx->async_data.free_entries);
1190                 pfree_link = pasync_ctx->async_data.free_list.next;
1191                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1192                 ring_id = phwi_ctrlr->default_pdu_data.id;
1193         }
1194
1195         writables = (writables / 8) * 8;
1196         if (writables) {
1197                 for (i = 0; i < writables; i++) {
1198                         pbusy_list =
1199                             hwi_get_async_busy_list(pasync_ctx, is_header,
1200                                                     host_write_num);
1201                         pasync_handle =
1202                             list_entry(pfree_link, struct async_pdu_handle,
1203                                                                 link);
1204                         WARN_ON(!pasync_handle);
1205                         pasync_handle->consumed = 0;
1206
1207                         pfree_link = pfree_link->next;
1208
1209                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1210                                                 is_header, host_write_num);
1211
1212                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1213                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1214
1215                         list_move(&pasync_handle->link, pbusy_list);
1216
1217                         host_write_num++;
1218                         host_write_num = host_write_num % num_entries;
1219                 }
1220
1221                 if (is_header) {
1222                         pasync_ctx->async_header.host_write_ptr =
1223                                                         host_write_num;
1224                         pasync_ctx->async_header.free_entries -= writables;
1225                         pasync_ctx->async_header.writables -= writables;
1226                         pasync_ctx->async_header.busy_entries += writables;
1227                 } else {
1228                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1229                         pasync_ctx->async_data.free_entries -= writables;
1230                         pasync_ctx->async_data.writables -= writables;
1231                         pasync_ctx->async_data.busy_entries += writables;
1232                 }
1233
1234                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1235                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1236                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1237                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1238                                         << DB_DEF_PDU_CQPROC_SHIFT;
1239
1240                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1241         }
1242 }
1243
1244 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1245                                          struct beiscsi_conn *beiscsi_conn,
1246                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1247 {
1248         struct hwi_controller *phwi_ctrlr;
1249         struct hwi_async_pdu_context *pasync_ctx;
1250         struct async_pdu_handle *pasync_handle = NULL;
1251         unsigned int cq_index = -1;
1252
1253         phwi_ctrlr = phba->phwi_ctrlr;
1254         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1255
1256         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1257                                              pdpdu_cqe, &cq_index);
1258         BUG_ON(pasync_handle->is_header != 0);
1259         if (pasync_handle->consumed == 0)
1260                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1261                                            cq_index);
1262
1263         hwi_free_async_msg(phba, pasync_handle->cri);
1264         hwi_post_async_buffers(phba, pasync_handle->is_header);
1265 }
1266
1267 static unsigned int
1268 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1269                   struct beiscsi_hba *phba,
1270                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1271 {
1272         struct list_head *plist;
1273         struct async_pdu_handle *pasync_handle;
1274         void *phdr = NULL;
1275         unsigned int hdr_len = 0, buf_len = 0;
1276         unsigned int status, index = 0, offset = 0;
1277         void *pfirst_buffer = NULL;
1278         unsigned int num_buf = 0;
1279
1280         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1281
1282         list_for_each_entry(pasync_handle, plist, link) {
1283                 if (index == 0) {
1284                         phdr = pasync_handle->pbuffer;
1285                         hdr_len = pasync_handle->buffer_len;
1286                 } else {
1287                         buf_len = pasync_handle->buffer_len;
1288                         if (!num_buf) {
1289                                 pfirst_buffer = pasync_handle->pbuffer;
1290                                 num_buf++;
1291                         }
1292                         memcpy(pfirst_buffer + offset,
1293                                pasync_handle->pbuffer, buf_len);
1294                         offset = buf_len;
1295                 }
1296                 index++;
1297         }
1298
1299         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1300                                            beiscsi_conn->beiscsi_conn_cid,
1301                                            phdr, hdr_len, pfirst_buffer,
1302                                            buf_len);
1303
1304         if (status == 0)
1305                 hwi_free_async_msg(phba, cri);
1306         return 0;
1307 }
1308
1309 static unsigned int
1310 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1311                      struct beiscsi_hba *phba,
1312                      struct async_pdu_handle *pasync_handle)
1313 {
1314         struct hwi_async_pdu_context *pasync_ctx;
1315         struct hwi_controller *phwi_ctrlr;
1316         unsigned int bytes_needed = 0, status = 0;
1317         unsigned short cri = pasync_handle->cri;
1318         struct pdu_base *ppdu;
1319
1320         phwi_ctrlr = phba->phwi_ctrlr;
1321         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1322
1323         list_del(&pasync_handle->link);
1324         if (pasync_handle->is_header) {
1325                 pasync_ctx->async_header.busy_entries--;
1326                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1327                         hwi_free_async_msg(phba, cri);
1328                         BUG();
1329                 }
1330
1331                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1332                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1333                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1334                                 (unsigned short)pasync_handle->buffer_len;
1335                 list_add_tail(&pasync_handle->link,
1336                               &pasync_ctx->async_entry[cri].wait_queue.list);
1337
1338                 ppdu = pasync_handle->pbuffer;
1339                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1340                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1341                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1342                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1343                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1344
1345                 if (status == 0) {
1346                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1347                             bytes_needed;
1348
1349                         if (bytes_needed == 0)
1350                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1351                                                            pasync_ctx, cri);
1352                 }
1353         } else {
1354                 pasync_ctx->async_data.busy_entries--;
1355                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1356                         list_add_tail(&pasync_handle->link,
1357                                       &pasync_ctx->async_entry[cri].wait_queue.
1358                                       list);
1359                         pasync_ctx->async_entry[cri].wait_queue.
1360                                 bytes_received +=
1361                                 (unsigned short)pasync_handle->buffer_len;
1362
1363                         if (pasync_ctx->async_entry[cri].wait_queue.
1364                             bytes_received >=
1365                             pasync_ctx->async_entry[cri].wait_queue.
1366                             bytes_needed)
1367                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1368                                                            pasync_ctx, cri);
1369                 }
1370         }
1371         return status;
1372 }
1373
1374 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1375                                          struct beiscsi_hba *phba,
1376                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1377 {
1378         struct hwi_controller *phwi_ctrlr;
1379         struct hwi_async_pdu_context *pasync_ctx;
1380         struct async_pdu_handle *pasync_handle = NULL;
1381         unsigned int cq_index = -1;
1382
1383         phwi_ctrlr = phba->phwi_ctrlr;
1384         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1385         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1386                                              pdpdu_cqe, &cq_index);
1387
1388         if (pasync_handle->consumed == 0)
1389                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1390                                            cq_index);
1391         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1392         hwi_post_async_buffers(phba, pasync_handle->is_header);
1393 }
1394
1395
1396 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1397 {
1398         struct be_queue_info *cq;
1399         struct sol_cqe *sol;
1400         struct dmsg_cqe *dmsg;
1401         unsigned int num_processed = 0;
1402         unsigned int tot_nump = 0;
1403         struct beiscsi_conn *beiscsi_conn;
1404         struct beiscsi_hba *phba;
1405
1406         cq = pbe_eq->cq;
1407         sol = queue_tail_node(cq);
1408         phba = pbe_eq->phba;
1409
1410         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1411                CQE_VALID_MASK) {
1412                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1413
1414                 beiscsi_conn = phba->conn_table[(u32) (sol->
1415                                  dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1416                                  SOL_CID_MASK) >> 6];
1417
1418                 if (!beiscsi_conn || !beiscsi_conn->ep) {
1419                         shost_printk(KERN_WARNING, phba->shost,
1420                                      "Connection table empty for cid = %d\n",
1421                                      (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1422                                      cid) / 32] & SOL_CID_MASK) >> 6);
1423                                 return 0;
1424                         }
1425
1426                 if (num_processed >= 32) {
1427                         hwi_ring_cq_db(phba, cq->id,
1428                                         num_processed, 0, 0);
1429                         tot_nump += num_processed;
1430                         num_processed = 0;
1431                 }
1432
1433                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1434                         32] & CQE_CODE_MASK) {
1435                 case SOL_CMD_COMPLETE:
1436                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1437                         break;
1438                 case DRIVERMSG_NOTIFY:
1439                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1440                         dmsg = (struct dmsg_cqe *)sol;
1441                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1442                         break;
1443                 case UNSOL_HDR_NOTIFY:
1444                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1445                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1446                                              (struct i_t_dpdu_cqe *)sol);
1447                         break;
1448                 case UNSOL_DATA_NOTIFY:
1449                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1450                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1451                                              (struct i_t_dpdu_cqe *)sol);
1452                         break;
1453                 case CXN_INVALIDATE_INDEX_NOTIFY:
1454                 case CMD_INVALIDATED_NOTIFY:
1455                 case CXN_INVALIDATE_NOTIFY:
1456                         SE_DEBUG(DBG_LVL_1,
1457                                  "Ignoring CQ Error notification for cmd/cxn"
1458                                  "invalidate\n");
1459                         break;
1460                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1461                 case CMD_KILLED_INVALID_STATSN_RCVD:
1462                 case CMD_KILLED_INVALID_R2T_RCVD:
1463                 case CMD_CXN_KILLED_LUN_INVALID:
1464                 case CMD_CXN_KILLED_ICD_INVALID:
1465                 case CMD_CXN_KILLED_ITT_INVALID:
1466                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1467                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1468                         SE_DEBUG(DBG_LVL_1,
1469                                  "CQ Error notification for cmd.. "
1470                                  "code %d cid 0x%x\n",
1471                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1472                                  32] & CQE_CODE_MASK,
1473                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1474                                  32] & SOL_CID_MASK));
1475                         break;
1476                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1477                         SE_DEBUG(DBG_LVL_1,
1478                                  "Digest error on def pdu ring, dropping..\n");
1479                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1480                                              (struct i_t_dpdu_cqe *) sol);
1481                         break;
1482                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1483                 case CXN_KILLED_BURST_LEN_MISMATCH:
1484                 case CXN_KILLED_AHS_RCVD:
1485                 case CXN_KILLED_HDR_DIGEST_ERR:
1486                 case CXN_KILLED_UNKNOWN_HDR:
1487                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1488                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1489                 case CXN_KILLED_TIMED_OUT:
1490                 case CXN_KILLED_FIN_RCVD:
1491                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1492                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1493                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1494                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1495                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1496                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1497                                  "0x%x...\n",
1498                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1499                                  32] & CQE_CODE_MASK,
1500                                  sol->dw[offsetof(struct amap_sol_cqe, cid) /
1501                                  32] & CQE_CID_MASK);
1502                         iscsi_conn_failure(beiscsi_conn->conn,
1503                                            ISCSI_ERR_CONN_FAILED);
1504                         break;
1505                 case CXN_KILLED_RST_SENT:
1506                 case CXN_KILLED_RST_RCVD:
1507                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1508                                 "received/sent on CID 0x%x...\n",
1509                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1510                                  32] & CQE_CODE_MASK,
1511                                  sol->dw[offsetof(struct amap_sol_cqe, cid) /
1512                                  32] & CQE_CID_MASK);
1513                         iscsi_conn_failure(beiscsi_conn->conn,
1514                                            ISCSI_ERR_CONN_FAILED);
1515                         break;
1516                 default:
1517                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1518                                  "received on CID 0x%x...\n",
1519                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1520                                  32] & CQE_CODE_MASK,
1521                                  sol->dw[offsetof(struct amap_sol_cqe, cid) /
1522                                  32] & CQE_CID_MASK);
1523                         break;
1524                 }
1525
1526                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1527                 queue_tail_inc(cq);
1528                 sol = queue_tail_node(cq);
1529                 num_processed++;
1530         }
1531
1532         if (num_processed > 0) {
1533                 tot_nump += num_processed;
1534                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1535         }
1536         return tot_nump;
1537 }
1538
1539 static void beiscsi_process_all_cqs(struct work_struct *work)
1540 {
1541         unsigned long flags;
1542         struct hwi_controller *phwi_ctrlr;
1543         struct hwi_context_memory *phwi_context;
1544         struct be_eq_obj *pbe_eq;
1545         struct beiscsi_hba *phba =
1546             container_of(work, struct beiscsi_hba, work_cqs);
1547
1548         phwi_ctrlr = phba->phwi_ctrlr;
1549         phwi_context = phwi_ctrlr->phwi_ctxt;
1550         if (phba->msix_enabled)
1551                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1552         else
1553                 pbe_eq = &phwi_context->be_eq[0];
1554
1555         if (phba->todo_mcc_cq) {
1556                 spin_lock_irqsave(&phba->isr_lock, flags);
1557                 phba->todo_mcc_cq = 0;
1558                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1559         }
1560
1561         if (phba->todo_cq) {
1562                 spin_lock_irqsave(&phba->isr_lock, flags);
1563                 phba->todo_cq = 0;
1564                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1565                 beiscsi_process_cq(pbe_eq);
1566         }
1567 }
1568
1569 static int be_iopoll(struct blk_iopoll *iop, int budget)
1570 {
1571         static unsigned int ret;
1572         struct beiscsi_hba *phba;
1573         struct be_eq_obj *pbe_eq;
1574
1575         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1576         ret = beiscsi_process_cq(pbe_eq);
1577         if (ret < budget) {
1578                 phba = pbe_eq->phba;
1579                 blk_iopoll_complete(iop);
1580                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1581                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1582         }
1583         return ret;
1584 }
1585
1586 static void
1587 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1588               unsigned int num_sg, struct beiscsi_io_task *io_task)
1589 {
1590         struct iscsi_sge *psgl;
1591         unsigned short sg_len, index;
1592         unsigned int sge_len = 0;
1593         unsigned long long addr;
1594         struct scatterlist *l_sg;
1595         unsigned int offset;
1596
1597         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1598                                       io_task->bhs_pa.u.a32.address_lo);
1599         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1600                                       io_task->bhs_pa.u.a32.address_hi);
1601
1602         l_sg = sg;
1603         for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1604                 if (index == 0) {
1605                         sg_len = sg_dma_len(sg);
1606                         addr = (u64) sg_dma_address(sg);
1607                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1608                                                         (addr & 0xFFFFFFFF));
1609                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1610                                                         (addr >> 32));
1611                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1612                                                         sg_len);
1613                         sge_len = sg_len;
1614                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1615                                                         1);
1616                 } else {
1617                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1618                                                         0);
1619                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1620                                                         pwrb, sge_len);
1621                         sg_len = sg_dma_len(sg);
1622                         addr = (u64) sg_dma_address(sg);
1623                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1624                                                         (addr & 0xFFFFFFFF));
1625                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1626                                                         (addr >> 32));
1627                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1628                                                         sg_len);
1629                 }
1630         }
1631         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1632         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1633
1634         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1635
1636         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1637                         io_task->bhs_pa.u.a32.address_hi);
1638         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1639                         io_task->bhs_pa.u.a32.address_lo);
1640
1641         if (num_sg == 2)
1642                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1643         sg = l_sg;
1644         psgl++;
1645         psgl++;
1646         offset = 0;
1647         for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1648                 sg_len = sg_dma_len(sg);
1649                 addr = (u64) sg_dma_address(sg);
1650                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1651                                                 (addr & 0xFFFFFFFF));
1652                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1653                                                 (addr >> 32));
1654                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1655                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1656                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1657                 offset += sg_len;
1658         }
1659         psgl--;
1660         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1661 }
1662
1663 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1664 {
1665         struct iscsi_sge *psgl;
1666         unsigned long long addr;
1667         struct beiscsi_io_task *io_task = task->dd_data;
1668         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1669         struct beiscsi_hba *phba = beiscsi_conn->phba;
1670
1671         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1672         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1673                                 io_task->bhs_pa.u.a32.address_lo);
1674         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1675                                 io_task->bhs_pa.u.a32.address_hi);
1676
1677         if (task->data) {
1678                 if (task->data_count) {
1679                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1680                         addr = (u64) pci_map_single(phba->pcidev,
1681                                                     task->data,
1682                                                     task->data_count, 1);
1683                 } else {
1684                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1685                         addr = 0;
1686                 }
1687                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1688                                                 (addr & 0xFFFFFFFF));
1689                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1690                                                 (addr >> 32));
1691                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1692                                                 task->data_count);
1693
1694                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1695         } else {
1696                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1697                 addr = 0;
1698         }
1699
1700         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1701
1702         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1703
1704         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1705                       io_task->bhs_pa.u.a32.address_hi);
1706         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1707                       io_task->bhs_pa.u.a32.address_lo);
1708         if (task->data) {
1709                 psgl++;
1710                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1711                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1712                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1713                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1714                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1715                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1716
1717                 psgl++;
1718                 if (task->data) {
1719                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1720                                                 (addr & 0xFFFFFFFF));
1721                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1722                                                 (addr >> 32));
1723                 }
1724                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1725         }
1726         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1727 }
1728
1729 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1730 {
1731         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1732         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1733         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1734
1735         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1736                                       sizeof(struct sol_cqe));
1737         num_async_pdu_buf_pages =
1738                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1739                                        phba->params.defpdu_hdr_sz);
1740         num_async_pdu_buf_sgl_pages =
1741                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1742                                        sizeof(struct phys_addr));
1743         num_async_pdu_data_pages =
1744                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1745                                        phba->params.defpdu_data_sz);
1746         num_async_pdu_data_sgl_pages =
1747                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1748                                        sizeof(struct phys_addr));
1749
1750         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1751
1752         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1753                                                  BE_ISCSI_PDU_HEADER_SIZE;
1754         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1755                                             sizeof(struct hwi_context_memory);
1756
1757
1758         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1759             * (phba->params.wrbs_per_cxn)
1760             * phba->params.cxns_per_ctrl;
1761         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1762                                  (phba->params.wrbs_per_cxn);
1763         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1764                                 phba->params.cxns_per_ctrl);
1765
1766         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1767                 phba->params.icds_per_ctrl;
1768         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1769                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1770
1771         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1772                 num_async_pdu_buf_pages * PAGE_SIZE;
1773         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1774                 num_async_pdu_data_pages * PAGE_SIZE;
1775         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1776                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1777         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1778                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1779         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1780                 phba->params.asyncpdus_per_ctrl *
1781                 sizeof(struct async_pdu_handle);
1782         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1783                 phba->params.asyncpdus_per_ctrl *
1784                 sizeof(struct async_pdu_handle);
1785         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1786                 sizeof(struct hwi_async_pdu_context) +
1787                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1788 }
1789
1790 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1791 {
1792         struct be_mem_descriptor *mem_descr;
1793         dma_addr_t bus_add;
1794         struct mem_array *mem_arr, *mem_arr_orig;
1795         unsigned int i, j, alloc_size, curr_alloc_size;
1796
1797         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1798         if (!phba->phwi_ctrlr)
1799                 return -ENOMEM;
1800
1801         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1802                                  GFP_KERNEL);
1803         if (!phba->init_mem) {
1804                 kfree(phba->phwi_ctrlr);
1805                 return -ENOMEM;
1806         }
1807
1808         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1809                                GFP_KERNEL);
1810         if (!mem_arr_orig) {
1811                 kfree(phba->init_mem);
1812                 kfree(phba->phwi_ctrlr);
1813                 return -ENOMEM;
1814         }
1815
1816         mem_descr = phba->init_mem;
1817         for (i = 0; i < SE_MEM_MAX; i++) {
1818                 j = 0;
1819                 mem_arr = mem_arr_orig;
1820                 alloc_size = phba->mem_req[i];
1821                 memset(mem_arr, 0, sizeof(struct mem_array) *
1822                        BEISCSI_MAX_FRAGS_INIT);
1823                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1824                 do {
1825                         mem_arr->virtual_address = pci_alloc_consistent(
1826                                                         phba->pcidev,
1827                                                         curr_alloc_size,
1828                                                         &bus_add);
1829                         if (!mem_arr->virtual_address) {
1830                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1831                                         goto free_mem;
1832                                 if (curr_alloc_size -
1833                                         rounddown_pow_of_two(curr_alloc_size))
1834                                         curr_alloc_size = rounddown_pow_of_two
1835                                                              (curr_alloc_size);
1836                                 else
1837                                         curr_alloc_size = curr_alloc_size / 2;
1838                         } else {
1839                                 mem_arr->bus_address.u.
1840                                     a64.address = (__u64) bus_add;
1841                                 mem_arr->size = curr_alloc_size;
1842                                 alloc_size -= curr_alloc_size;
1843                                 curr_alloc_size = min(be_max_phys_size *
1844                                                       1024, alloc_size);
1845                                 j++;
1846                                 mem_arr++;
1847                         }
1848                 } while (alloc_size);
1849                 mem_descr->num_elements = j;
1850                 mem_descr->size_in_bytes = phba->mem_req[i];
1851                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1852                                                GFP_KERNEL);
1853                 if (!mem_descr->mem_array)
1854                         goto free_mem;
1855
1856                 memcpy(mem_descr->mem_array, mem_arr_orig,
1857                        sizeof(struct mem_array) * j);
1858                 mem_descr++;
1859         }
1860         kfree(mem_arr_orig);
1861         return 0;
1862 free_mem:
1863         mem_descr->num_elements = j;
1864         while ((i) || (j)) {
1865                 for (j = mem_descr->num_elements; j > 0; j--) {
1866                         pci_free_consistent(phba->pcidev,
1867                                             mem_descr->mem_array[j - 1].size,
1868                                             mem_descr->mem_array[j - 1].
1869                                             virtual_address,
1870                                             mem_descr->mem_array[j - 1].
1871                                             bus_address.u.a64.address);
1872                 }
1873                 if (i) {
1874                         i--;
1875                         kfree(mem_descr->mem_array);
1876                         mem_descr--;
1877                 }
1878         }
1879         kfree(mem_arr_orig);
1880         kfree(phba->init_mem);
1881         kfree(phba->phwi_ctrlr);
1882         return -ENOMEM;
1883 }
1884
1885 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1886 {
1887         beiscsi_find_mem_req(phba);
1888         return beiscsi_alloc_mem(phba);
1889 }
1890
1891 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1892 {
1893         struct pdu_data_out *pdata_out;
1894         struct pdu_nop_out *pnop_out;
1895         struct be_mem_descriptor *mem_descr;
1896
1897         mem_descr = phba->init_mem;
1898         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1899         pdata_out =
1900             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1901         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1902
1903         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1904                       IIOC_SCSI_DATA);
1905
1906         pnop_out =
1907             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1908                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1909
1910         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1911         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1912         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1913         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1914 }
1915
1916 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1917 {
1918         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1919         struct wrb_handle *pwrb_handle;
1920         struct hwi_controller *phwi_ctrlr;
1921         struct hwi_wrb_context *pwrb_context;
1922         struct iscsi_wrb *pwrb;
1923         unsigned int num_cxn_wrbh;
1924         unsigned int num_cxn_wrb, j, idx, index;
1925
1926         mem_descr_wrbh = phba->init_mem;
1927         mem_descr_wrbh += HWI_MEM_WRBH;
1928
1929         mem_descr_wrb = phba->init_mem;
1930         mem_descr_wrb += HWI_MEM_WRB;
1931
1932         idx = 0;
1933         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
1934         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
1935                         ((sizeof(struct wrb_handle)) *
1936                          phba->params.wrbs_per_cxn));
1937         phwi_ctrlr = phba->phwi_ctrlr;
1938
1939         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1940                 pwrb_context = &phwi_ctrlr->wrb_context[index];
1941                 pwrb_context->pwrb_handle_base =
1942                                 kzalloc(sizeof(struct wrb_handle *) *
1943                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
1944                 pwrb_context->pwrb_handle_basestd =
1945                                 kzalloc(sizeof(struct wrb_handle *) *
1946                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
1947                 if (num_cxn_wrbh) {
1948                         pwrb_context->alloc_index = 0;
1949                         pwrb_context->wrb_handles_available = 0;
1950                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1951                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
1952                                 pwrb_context->pwrb_handle_basestd[j] =
1953                                                                 pwrb_handle;
1954                                 pwrb_context->wrb_handles_available++;
1955                                 pwrb_handle->wrb_index = j;
1956                                 pwrb_handle++;
1957                         }
1958                         pwrb_context->free_index = 0;
1959                         num_cxn_wrbh--;
1960                 } else {
1961                         idx++;
1962                         pwrb_handle =
1963                             mem_descr_wrbh->mem_array[idx].virtual_address;
1964                         num_cxn_wrbh =
1965                             ((mem_descr_wrbh->mem_array[idx].size) /
1966                              ((sizeof(struct wrb_handle)) *
1967                               phba->params.wrbs_per_cxn));
1968                         pwrb_context->alloc_index = 0;
1969                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1970                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
1971                                 pwrb_context->pwrb_handle_basestd[j] =
1972                                     pwrb_handle;
1973                                 pwrb_context->wrb_handles_available++;
1974                                 pwrb_handle->wrb_index = j;
1975                                 pwrb_handle++;
1976                         }
1977                         pwrb_context->free_index = 0;
1978                         num_cxn_wrbh--;
1979                 }
1980         }
1981         idx = 0;
1982         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
1983         num_cxn_wrb =
1984             ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
1985              phba->params.wrbs_per_cxn);
1986
1987         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
1988                 pwrb_context = &phwi_ctrlr->wrb_context[index];
1989                 if (num_cxn_wrb) {
1990                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
1991                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
1992                                 pwrb_handle->pwrb = pwrb;
1993                                 pwrb++;
1994                         }
1995                         num_cxn_wrb--;
1996                 } else {
1997                         idx++;
1998                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
1999                         num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2000                                         (sizeof(struct iscsi_wrb)) *
2001                                         phba->params.wrbs_per_cxn);
2002                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2003                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2004                                 pwrb_handle->pwrb = pwrb;
2005                                 pwrb++;
2006                         }
2007                         num_cxn_wrb--;
2008                 }
2009         }
2010 }
2011
2012 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2013 {
2014         struct hwi_controller *phwi_ctrlr;
2015         struct hba_parameters *p = &phba->params;
2016         struct hwi_async_pdu_context *pasync_ctx;
2017         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2018         unsigned int index;
2019         struct be_mem_descriptor *mem_descr;
2020
2021         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2022         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2023
2024         phwi_ctrlr = phba->phwi_ctrlr;
2025         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2026                                 mem_descr->mem_array[0].virtual_address;
2027         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2028         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2029
2030         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2031         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2032         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2033         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2034
2035         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2036         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2037         if (mem_descr->mem_array[0].virtual_address) {
2038                 SE_DEBUG(DBG_LVL_8,
2039                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2040                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2041         } else
2042                 shost_printk(KERN_WARNING, phba->shost,
2043                              "No Virtual address \n");
2044
2045         pasync_ctx->async_header.va_base =
2046                         mem_descr->mem_array[0].virtual_address;
2047
2048         pasync_ctx->async_header.pa_base.u.a64.address =
2049                         mem_descr->mem_array[0].bus_address.u.a64.address;
2050
2051         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2052         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2053         if (mem_descr->mem_array[0].virtual_address) {
2054                 SE_DEBUG(DBG_LVL_8,
2055                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2056                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2057         } else
2058                 shost_printk(KERN_WARNING, phba->shost,
2059                             "No Virtual address \n");
2060         pasync_ctx->async_header.ring_base =
2061                         mem_descr->mem_array[0].virtual_address;
2062
2063         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2064         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2065         if (mem_descr->mem_array[0].virtual_address) {
2066                 SE_DEBUG(DBG_LVL_8,
2067                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2068                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2069         } else
2070                 shost_printk(KERN_WARNING, phba->shost,
2071                             "No Virtual address \n");
2072
2073         pasync_ctx->async_header.handle_base =
2074                         mem_descr->mem_array[0].virtual_address;
2075         pasync_ctx->async_header.writables = 0;
2076         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2077
2078         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2079         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2080         if (mem_descr->mem_array[0].virtual_address) {
2081                 SE_DEBUG(DBG_LVL_8,
2082                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2083                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2084         } else
2085                 shost_printk(KERN_WARNING, phba->shost,
2086                             "No Virtual address \n");
2087         pasync_ctx->async_data.va_base =
2088                         mem_descr->mem_array[0].virtual_address;
2089         pasync_ctx->async_data.pa_base.u.a64.address =
2090                         mem_descr->mem_array[0].bus_address.u.a64.address;
2091
2092         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2093         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2094         if (mem_descr->mem_array[0].virtual_address) {
2095                 SE_DEBUG(DBG_LVL_8,
2096                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2097                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2098         } else
2099                 shost_printk(KERN_WARNING, phba->shost,
2100                              "No Virtual address \n");
2101
2102         pasync_ctx->async_data.ring_base =
2103                         mem_descr->mem_array[0].virtual_address;
2104
2105         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2106         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2107         if (!mem_descr->mem_array[0].virtual_address)
2108                 shost_printk(KERN_WARNING, phba->shost,
2109                             "No Virtual address \n");
2110
2111         pasync_ctx->async_data.handle_base =
2112                         mem_descr->mem_array[0].virtual_address;
2113         pasync_ctx->async_data.writables = 0;
2114         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2115
2116         pasync_header_h =
2117                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2118         pasync_data_h =
2119                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2120
2121         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2122                 pasync_header_h->cri = -1;
2123                 pasync_header_h->index = (char)index;
2124                 INIT_LIST_HEAD(&pasync_header_h->link);
2125                 pasync_header_h->pbuffer =
2126                         (void *)((unsigned long)
2127                         (pasync_ctx->async_header.va_base) +
2128                         (p->defpdu_hdr_sz * index));
2129
2130                 pasync_header_h->pa.u.a64.address =
2131                         pasync_ctx->async_header.pa_base.u.a64.address +
2132                         (p->defpdu_hdr_sz * index);
2133
2134                 list_add_tail(&pasync_header_h->link,
2135                                 &pasync_ctx->async_header.free_list);
2136                 pasync_header_h++;
2137                 pasync_ctx->async_header.free_entries++;
2138                 pasync_ctx->async_header.writables++;
2139
2140                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2141                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2142                                header_busy_list);
2143                 pasync_data_h->cri = -1;
2144                 pasync_data_h->index = (char)index;
2145                 INIT_LIST_HEAD(&pasync_data_h->link);
2146                 pasync_data_h->pbuffer =
2147                         (void *)((unsigned long)
2148                         (pasync_ctx->async_data.va_base) +
2149                         (p->defpdu_data_sz * index));
2150
2151                 pasync_data_h->pa.u.a64.address =
2152                     pasync_ctx->async_data.pa_base.u.a64.address +
2153                     (p->defpdu_data_sz * index);
2154
2155                 list_add_tail(&pasync_data_h->link,
2156                               &pasync_ctx->async_data.free_list);
2157                 pasync_data_h++;
2158                 pasync_ctx->async_data.free_entries++;
2159                 pasync_ctx->async_data.writables++;
2160
2161                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2162         }
2163
2164         pasync_ctx->async_header.host_write_ptr = 0;
2165         pasync_ctx->async_header.ep_read_ptr = -1;
2166         pasync_ctx->async_data.host_write_ptr = 0;
2167         pasync_ctx->async_data.ep_read_ptr = -1;
2168 }
2169
2170 static int
2171 be_sgl_create_contiguous(void *virtual_address,
2172                          u64 physical_address, u32 length,
2173                          struct be_dma_mem *sgl)
2174 {
2175         WARN_ON(!virtual_address);
2176         WARN_ON(!physical_address);
2177         WARN_ON(!length > 0);
2178         WARN_ON(!sgl);
2179
2180         sgl->va = virtual_address;
2181         sgl->dma = physical_address;
2182         sgl->size = length;
2183
2184         return 0;
2185 }
2186
2187 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2188 {
2189         memset(sgl, 0, sizeof(*sgl));
2190 }
2191
2192 static void
2193 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2194                      struct mem_array *pmem, struct be_dma_mem *sgl)
2195 {
2196         if (sgl->va)
2197                 be_sgl_destroy_contiguous(sgl);
2198
2199         be_sgl_create_contiguous(pmem->virtual_address,
2200                                  pmem->bus_address.u.a64.address,
2201                                  pmem->size, sgl);
2202 }
2203
2204 static void
2205 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2206                            struct mem_array *pmem, struct be_dma_mem *sgl)
2207 {
2208         if (sgl->va)
2209                 be_sgl_destroy_contiguous(sgl);
2210
2211         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2212                                  pmem->bus_address.u.a64.address,
2213                                  pmem->size, sgl);
2214 }
2215
2216 static int be_fill_queue(struct be_queue_info *q,
2217                 u16 len, u16 entry_size, void *vaddress)
2218 {
2219         struct be_dma_mem *mem = &q->dma_mem;
2220
2221         memset(q, 0, sizeof(*q));
2222         q->len = len;
2223         q->entry_size = entry_size;
2224         mem->size = len * entry_size;
2225         mem->va = vaddress;
2226         if (!mem->va)
2227                 return -ENOMEM;
2228         memset(mem->va, 0, mem->size);
2229         return 0;
2230 }
2231
2232 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2233                              struct hwi_context_memory *phwi_context)
2234 {
2235         unsigned int i, num_eq_pages;
2236         int ret, eq_for_mcc;
2237         struct be_queue_info *eq;
2238         struct be_dma_mem *mem;
2239         void *eq_vaddress;
2240         dma_addr_t paddr;
2241
2242         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2243                                       sizeof(struct be_eq_entry));
2244
2245         if (phba->msix_enabled)
2246                 eq_for_mcc = 1;
2247         else
2248                 eq_for_mcc = 0;
2249         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2250                 eq = &phwi_context->be_eq[i].q;
2251                 mem = &eq->dma_mem;
2252                 phwi_context->be_eq[i].phba = phba;
2253                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2254                                                      num_eq_pages * PAGE_SIZE,
2255                                                      &paddr);
2256                 if (!eq_vaddress)
2257                         goto create_eq_error;
2258
2259                 mem->va = eq_vaddress;
2260                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2261                                     sizeof(struct be_eq_entry), eq_vaddress);
2262                 if (ret) {
2263                         shost_printk(KERN_ERR, phba->shost,
2264                                      "be_fill_queue Failed for EQ \n");
2265                         goto create_eq_error;
2266                 }
2267
2268                 mem->dma = paddr;
2269                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2270                                             phwi_context->cur_eqd);
2271                 if (ret) {
2272                         shost_printk(KERN_ERR, phba->shost,
2273                                      "beiscsi_cmd_eq_create"
2274                                      "Failedfor EQ \n");
2275                         goto create_eq_error;
2276                 }
2277                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2278         }
2279         return 0;
2280 create_eq_error:
2281         for (i = 0; i < (phba->num_cpus + 1); i++) {
2282                 eq = &phwi_context->be_eq[i].q;
2283                 mem = &eq->dma_mem;
2284                 if (mem->va)
2285                         pci_free_consistent(phba->pcidev, num_eq_pages
2286                                             * PAGE_SIZE,
2287                                             mem->va, mem->dma);
2288         }
2289         return ret;
2290 }
2291
2292 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2293                              struct hwi_context_memory *phwi_context)
2294 {
2295         unsigned int i, num_cq_pages;
2296         int ret;
2297         struct be_queue_info *cq, *eq;
2298         struct be_dma_mem *mem;
2299         struct be_eq_obj *pbe_eq;
2300         void *cq_vaddress;
2301         dma_addr_t paddr;
2302
2303         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2304                                       sizeof(struct sol_cqe));
2305
2306         for (i = 0; i < phba->num_cpus; i++) {
2307                 cq = &phwi_context->be_cq[i];
2308                 eq = &phwi_context->be_eq[i].q;
2309                 pbe_eq = &phwi_context->be_eq[i];
2310                 pbe_eq->cq = cq;
2311                 pbe_eq->phba = phba;
2312                 mem = &cq->dma_mem;
2313                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2314                                                      num_cq_pages * PAGE_SIZE,
2315                                                      &paddr);
2316                 if (!cq_vaddress)
2317                         goto create_cq_error;
2318                 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
2319                                     sizeof(struct sol_cqe), cq_vaddress);
2320                 if (ret) {
2321                         shost_printk(KERN_ERR, phba->shost,
2322                                      "be_fill_queue Failed for ISCSI CQ \n");
2323                         goto create_cq_error;
2324                 }
2325
2326                 mem->dma = paddr;
2327                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2328                                             false, 0);
2329                 if (ret) {
2330                         shost_printk(KERN_ERR, phba->shost,
2331                                      "beiscsi_cmd_eq_create"
2332                                      "Failed for ISCSI CQ \n");
2333                         goto create_cq_error;
2334                 }
2335                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2336                                                  cq->id, eq->id);
2337                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2338         }
2339         return 0;
2340
2341 create_cq_error:
2342         for (i = 0; i < phba->num_cpus; i++) {
2343                 cq = &phwi_context->be_cq[i];
2344                 mem = &cq->dma_mem;
2345                 if (mem->va)
2346                         pci_free_consistent(phba->pcidev, num_cq_pages
2347                                             * PAGE_SIZE,
2348                                             mem->va, mem->dma);
2349         }
2350         return ret;
2351
2352 }
2353
2354 static int
2355 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2356                        struct hwi_context_memory *phwi_context,
2357                        struct hwi_controller *phwi_ctrlr,
2358                        unsigned int def_pdu_ring_sz)
2359 {
2360         unsigned int idx;
2361         int ret;
2362         struct be_queue_info *dq, *cq;
2363         struct be_dma_mem *mem;
2364         struct be_mem_descriptor *mem_descr;
2365         void *dq_vaddress;
2366
2367         idx = 0;
2368         dq = &phwi_context->be_def_hdrq;
2369         cq = &phwi_context->be_cq[0];
2370         mem = &dq->dma_mem;
2371         mem_descr = phba->init_mem;
2372         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2373         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2374         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2375                             sizeof(struct phys_addr),
2376                             sizeof(struct phys_addr), dq_vaddress);
2377         if (ret) {
2378                 shost_printk(KERN_ERR, phba->shost,
2379                              "be_fill_queue Failed for DEF PDU HDR\n");
2380                 return ret;
2381         }
2382         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2383         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2384                                               def_pdu_ring_sz,
2385                                               phba->params.defpdu_hdr_sz);
2386         if (ret) {
2387                 shost_printk(KERN_ERR, phba->shost,
2388                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2389                 return ret;
2390         }
2391         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2392         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2393                  phwi_context->be_def_hdrq.id);
2394         hwi_post_async_buffers(phba, 1);
2395         return 0;
2396 }
2397
2398 static int
2399 beiscsi_create_def_data(struct beiscsi_hba *phba,
2400                         struct hwi_context_memory *phwi_context,
2401                         struct hwi_controller *phwi_ctrlr,
2402                         unsigned int def_pdu_ring_sz)
2403 {
2404         unsigned int idx;
2405         int ret;
2406         struct be_queue_info *dataq, *cq;
2407         struct be_dma_mem *mem;
2408         struct be_mem_descriptor *mem_descr;
2409         void *dq_vaddress;
2410
2411         idx = 0;
2412         dataq = &phwi_context->be_def_dataq;
2413         cq = &phwi_context->be_cq[0];
2414         mem = &dataq->dma_mem;
2415         mem_descr = phba->init_mem;
2416         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2417         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2418         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2419                             sizeof(struct phys_addr),
2420                             sizeof(struct phys_addr), dq_vaddress);
2421         if (ret) {
2422                 shost_printk(KERN_ERR, phba->shost,
2423                              "be_fill_queue Failed for DEF PDU DATA\n");
2424                 return ret;
2425         }
2426         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2427         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2428                                               def_pdu_ring_sz,
2429                                               phba->params.defpdu_data_sz);
2430         if (ret) {
2431                 shost_printk(KERN_ERR, phba->shost,
2432                              "be_cmd_create_default_pdu_queue Failed"
2433                              " for DEF PDU DATA\n");
2434                 return ret;
2435         }
2436         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2437         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2438                  phwi_context->be_def_dataq.id);
2439         hwi_post_async_buffers(phba, 0);
2440         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2441         return 0;
2442 }
2443
2444 static int
2445 beiscsi_post_pages(struct beiscsi_hba *phba)
2446 {
2447         struct be_mem_descriptor *mem_descr;
2448         struct mem_array *pm_arr;
2449         unsigned int page_offset, i;
2450         struct be_dma_mem sgl;
2451         int status;
2452
2453         mem_descr = phba->init_mem;
2454         mem_descr += HWI_MEM_SGE;
2455         pm_arr = mem_descr->mem_array;
2456
2457         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2458                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2459         for (i = 0; i < mem_descr->num_elements; i++) {
2460                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2461                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2462                                                 page_offset,
2463                                                 (pm_arr->size / PAGE_SIZE));
2464                 page_offset += pm_arr->size / PAGE_SIZE;
2465                 if (status != 0) {
2466                         shost_printk(KERN_ERR, phba->shost,
2467                                      "post sgl failed.\n");
2468                         return status;
2469                 }
2470                 pm_arr++;
2471         }
2472         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2473         return 0;
2474 }
2475
2476 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2477 {
2478         struct be_dma_mem *mem = &q->dma_mem;
2479         if (mem->va)
2480                 pci_free_consistent(phba->pcidev, mem->size,
2481                         mem->va, mem->dma);
2482 }
2483
2484 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2485                 u16 len, u16 entry_size)
2486 {
2487         struct be_dma_mem *mem = &q->dma_mem;
2488
2489         memset(q, 0, sizeof(*q));
2490         q->len = len;
2491         q->entry_size = entry_size;
2492         mem->size = len * entry_size;
2493         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2494         if (!mem->va)
2495                 return -1;
2496         memset(mem->va, 0, mem->size);
2497         return 0;
2498 }
2499
2500 static int
2501 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2502                          struct hwi_context_memory *phwi_context,
2503                          struct hwi_controller *phwi_ctrlr)
2504 {
2505         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2506         u64 pa_addr_lo;
2507         unsigned int idx, num, i;
2508         struct mem_array *pwrb_arr;
2509         void *wrb_vaddr;
2510         struct be_dma_mem sgl;
2511         struct be_mem_descriptor *mem_descr;
2512         int status;
2513
2514         idx = 0;
2515         mem_descr = phba->init_mem;
2516         mem_descr += HWI_MEM_WRB;
2517         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2518                            GFP_KERNEL);
2519         if (!pwrb_arr) {
2520                 shost_printk(KERN_ERR, phba->shost,
2521                              "Memory alloc failed in create wrb ring.\n");
2522                 return -ENOMEM;
2523         }
2524         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2525         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2526         num_wrb_rings = mem_descr->mem_array[idx].size /
2527                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2528
2529         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2530                 if (num_wrb_rings) {
2531                         pwrb_arr[num].virtual_address = wrb_vaddr;
2532                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2533                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2534                                             sizeof(struct iscsi_wrb);
2535                         wrb_vaddr += pwrb_arr[num].size;
2536                         pa_addr_lo += pwrb_arr[num].size;
2537                         num_wrb_rings--;
2538                 } else {
2539                         idx++;
2540                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2541                         pa_addr_lo = mem_descr->mem_array[idx].\
2542                                         bus_address.u.a64.address;
2543                         num_wrb_rings = mem_descr->mem_array[idx].size /
2544                                         (phba->params.wrbs_per_cxn *
2545                                         sizeof(struct iscsi_wrb));
2546                         pwrb_arr[num].virtual_address = wrb_vaddr;
2547                         pwrb_arr[num].bus_address.u.a64.address\
2548                                                 = pa_addr_lo;
2549                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2550                                                  sizeof(struct iscsi_wrb);
2551                         wrb_vaddr += pwrb_arr[num].size;
2552                         pa_addr_lo   += pwrb_arr[num].size;
2553                         num_wrb_rings--;
2554                 }
2555         }
2556         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2557                 wrb_mem_index = 0;
2558                 offset = 0;
2559                 size = 0;
2560
2561                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2562                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2563                                             &phwi_context->be_wrbq[i]);
2564                 if (status != 0) {
2565                         shost_printk(KERN_ERR, phba->shost,
2566                                      "wrbq create failed.");
2567                         return status;
2568                 }
2569                 phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
2570         }
2571         kfree(pwrb_arr);
2572         return 0;
2573 }
2574
2575 static void free_wrb_handles(struct beiscsi_hba *phba)
2576 {
2577         unsigned int index;
2578         struct hwi_controller *phwi_ctrlr;
2579         struct hwi_wrb_context *pwrb_context;
2580
2581         phwi_ctrlr = phba->phwi_ctrlr;
2582         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2583                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2584                 kfree(pwrb_context->pwrb_handle_base);
2585                 kfree(pwrb_context->pwrb_handle_basestd);
2586         }
2587 }
2588
2589 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2590 {
2591         struct be_queue_info *q;
2592         struct be_ctrl_info *ctrl = &phba->ctrl;
2593
2594         q = &phba->ctrl.mcc_obj.q;
2595         if (q->created)
2596                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2597         be_queue_free(phba, q);
2598
2599         q = &phba->ctrl.mcc_obj.cq;
2600         if (q->created)
2601                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2602         be_queue_free(phba, q);
2603 }
2604
2605 static void hwi_cleanup(struct beiscsi_hba *phba)
2606 {
2607         struct be_queue_info *q;
2608         struct be_ctrl_info *ctrl = &phba->ctrl;
2609         struct hwi_controller *phwi_ctrlr;
2610         struct hwi_context_memory *phwi_context;
2611         int i, eq_num;
2612
2613         phwi_ctrlr = phba->phwi_ctrlr;
2614         phwi_context = phwi_ctrlr->phwi_ctxt;
2615         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2616                 q = &phwi_context->be_wrbq[i];
2617                 if (q->created)
2618                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2619         }
2620         free_wrb_handles(phba);
2621
2622         q = &phwi_context->be_def_hdrq;
2623         if (q->created)
2624                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2625
2626         q = &phwi_context->be_def_dataq;
2627         if (q->created)
2628                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2629
2630         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2631
2632         for (i = 0; i < (phba->num_cpus); i++) {
2633                 q = &phwi_context->be_cq[i];
2634                 if (q->created)
2635                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2636         }
2637         if (phba->msix_enabled)
2638                 eq_num = 1;
2639         else
2640                 eq_num = 0;
2641         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2642                 q = &phwi_context->be_eq[i].q;
2643                 if (q->created)
2644                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2645         }
2646         be_mcc_queues_destroy(phba);
2647 }
2648
2649 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2650                                 struct hwi_context_memory *phwi_context)
2651 {
2652         struct be_queue_info *q, *cq;
2653         struct be_ctrl_info *ctrl = &phba->ctrl;
2654
2655         /* Alloc MCC compl queue */
2656         cq = &phba->ctrl.mcc_obj.cq;
2657         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2658                         sizeof(struct be_mcc_compl)))
2659                 goto err;
2660         /* Ask BE to create MCC compl queue; */
2661         if (phba->msix_enabled) {
2662                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2663                                          [phba->num_cpus].q, false, true, 0))
2664                 goto mcc_cq_free;
2665         } else {
2666                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2667                                           false, true, 0))
2668                 goto mcc_cq_free;
2669         }
2670
2671         /* Alloc MCC queue */
2672         q = &phba->ctrl.mcc_obj.q;
2673         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2674                 goto mcc_cq_destroy;
2675
2676         /* Ask BE to create MCC queue */
2677         if (be_cmd_mccq_create(phba, q, cq))
2678                 goto mcc_q_free;
2679
2680         return 0;
2681
2682 mcc_q_free:
2683         be_queue_free(phba, q);
2684 mcc_cq_destroy:
2685         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2686 mcc_cq_free:
2687         be_queue_free(phba, cq);
2688 err:
2689         return -1;
2690 }
2691
2692 static int find_num_cpus(void)
2693 {
2694         int  num_cpus = 0;
2695
2696         num_cpus = num_online_cpus();
2697         if (num_cpus >= MAX_CPUS)
2698                 num_cpus = MAX_CPUS - 1;
2699
2700         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2701         return num_cpus;
2702 }
2703
2704 static int hwi_init_port(struct beiscsi_hba *phba)
2705 {
2706         struct hwi_controller *phwi_ctrlr;
2707         struct hwi_context_memory *phwi_context;
2708         unsigned int def_pdu_ring_sz;
2709         struct be_ctrl_info *ctrl = &phba->ctrl;
2710         int status;
2711
2712         def_pdu_ring_sz =
2713                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2714         phwi_ctrlr = phba->phwi_ctrlr;
2715         phwi_context = phwi_ctrlr->phwi_ctxt;
2716         phwi_context->max_eqd = 0;
2717         phwi_context->min_eqd = 0;
2718         phwi_context->cur_eqd = 64;
2719         be_cmd_fw_initialize(&phba->ctrl);
2720
2721         status = beiscsi_create_eqs(phba, phwi_context);
2722         if (status != 0) {
2723                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2724                 goto error;
2725         }
2726
2727         status = be_mcc_queues_create(phba, phwi_context);
2728         if (status != 0)
2729                 goto error;
2730
2731         status = mgmt_check_supported_fw(ctrl, phba);
2732         if (status != 0) {
2733                 shost_printk(KERN_ERR, phba->shost,
2734                              "Unsupported fw version \n");
2735                 goto error;
2736         }
2737
2738         status = mgmt_get_fw_config(ctrl, phba);
2739         if (status != 0) {
2740                 shost_printk(KERN_ERR, phba->shost,
2741                              "Error getting fw config\n");
2742                 goto error;
2743         }
2744
2745         status = beiscsi_create_cqs(phba, phwi_context);
2746         if (status != 0) {
2747                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2748                 goto error;
2749         }
2750
2751         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2752                                         def_pdu_ring_sz);
2753         if (status != 0) {
2754                 shost_printk(KERN_ERR, phba->shost,
2755                              "Default Header not created\n");
2756                 goto error;
2757         }
2758
2759         status = beiscsi_create_def_data(phba, phwi_context,
2760                                          phwi_ctrlr, def_pdu_ring_sz);
2761         if (status != 0) {
2762                 shost_printk(KERN_ERR, phba->shost,
2763                              "Default Data not created\n");
2764                 goto error;
2765         }
2766
2767         status = beiscsi_post_pages(phba);
2768         if (status != 0) {
2769                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2770                 goto error;
2771         }
2772
2773         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2774         if (status != 0) {
2775                 shost_printk(KERN_ERR, phba->shost,
2776                              "WRB Rings not created\n");
2777                 goto error;
2778         }
2779
2780         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2781         return 0;
2782
2783 error:
2784         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2785         hwi_cleanup(phba);
2786         return -ENOMEM;
2787 }
2788
2789 static int hwi_init_controller(struct beiscsi_hba *phba)
2790 {
2791         struct hwi_controller *phwi_ctrlr;
2792
2793         phwi_ctrlr = phba->phwi_ctrlr;
2794         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2795                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2796                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2797                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2798                          phwi_ctrlr->phwi_ctxt);
2799         } else {
2800                 shost_printk(KERN_ERR, phba->shost,
2801                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2802                              "Failing to load\n");
2803                 return -ENOMEM;
2804         }
2805
2806         iscsi_init_global_templates(phba);
2807         beiscsi_init_wrb_handle(phba);
2808         hwi_init_async_pdu_ctx(phba);
2809         if (hwi_init_port(phba) != 0) {
2810                 shost_printk(KERN_ERR, phba->shost,
2811                              "hwi_init_controller failed\n");
2812                 return -ENOMEM;
2813         }
2814         return 0;
2815 }
2816
2817 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2818 {
2819         struct be_mem_descriptor *mem_descr;
2820         int i, j;
2821
2822         mem_descr = phba->init_mem;
2823         i = 0;
2824         j = 0;
2825         for (i = 0; i < SE_MEM_MAX; i++) {
2826                 for (j = mem_descr->num_elements; j > 0; j--) {
2827                         pci_free_consistent(phba->pcidev,
2828                           mem_descr->mem_array[j - 1].size,
2829                           mem_descr->mem_array[j - 1].virtual_address,
2830                           mem_descr->mem_array[j - 1].bus_address.
2831                                 u.a64.address);
2832                 }
2833                 kfree(mem_descr->mem_array);
2834                 mem_descr++;
2835         }
2836         kfree(phba->init_mem);
2837         kfree(phba->phwi_ctrlr);
2838 }
2839
2840 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2841 {
2842         int ret = -ENOMEM;
2843
2844         ret = beiscsi_get_memory(phba);
2845         if (ret < 0) {
2846                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2847                              "Failed in beiscsi_alloc_memory \n");
2848                 return ret;
2849         }
2850
2851         ret = hwi_init_controller(phba);
2852         if (ret)
2853                 goto free_init;
2854         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2855         return 0;
2856
2857 free_init:
2858         beiscsi_free_mem(phba);
2859         return -ENOMEM;
2860 }
2861
2862 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2863 {
2864         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2865         struct sgl_handle *psgl_handle;
2866         struct iscsi_sge *pfrag;
2867         unsigned int arr_index, i, idx;
2868
2869         phba->io_sgl_hndl_avbl = 0;
2870         phba->eh_sgl_hndl_avbl = 0;
2871
2872         mem_descr_sglh = phba->init_mem;
2873         mem_descr_sglh += HWI_MEM_SGLH;
2874         if (1 == mem_descr_sglh->num_elements) {
2875                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2876                                                  phba->params.ios_per_ctrl,
2877                                                  GFP_KERNEL);
2878                 if (!phba->io_sgl_hndl_base) {
2879                         shost_printk(KERN_ERR, phba->shost,
2880                                      "Mem Alloc Failed. Failing to load\n");
2881                         return -ENOMEM;
2882                 }
2883                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2884                                                  (phba->params.icds_per_ctrl -
2885                                                  phba->params.ios_per_ctrl),
2886                                                  GFP_KERNEL);
2887                 if (!phba->eh_sgl_hndl_base) {
2888                         kfree(phba->io_sgl_hndl_base);
2889                         shost_printk(KERN_ERR, phba->shost,
2890                                      "Mem Alloc Failed. Failing to load\n");
2891                         return -ENOMEM;
2892                 }
2893         } else {
2894                 shost_printk(KERN_ERR, phba->shost,
2895                              "HWI_MEM_SGLH is more than one element."
2896                              "Failing to load\n");
2897                 return -ENOMEM;
2898         }
2899
2900         arr_index = 0;
2901         idx = 0;
2902         while (idx < mem_descr_sglh->num_elements) {
2903                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2904
2905                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2906                       sizeof(struct sgl_handle)); i++) {
2907                         if (arr_index < phba->params.ios_per_ctrl) {
2908                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2909                                 phba->io_sgl_hndl_avbl++;
2910                                 arr_index++;
2911                         } else {
2912                                 phba->eh_sgl_hndl_base[arr_index -
2913                                         phba->params.ios_per_ctrl] =
2914                                                                 psgl_handle;
2915                                 arr_index++;
2916                                 phba->eh_sgl_hndl_avbl++;
2917                         }
2918                         psgl_handle++;
2919                 }
2920                 idx++;
2921         }
2922         SE_DEBUG(DBG_LVL_8,
2923                  "phba->io_sgl_hndl_avbl=%d"
2924                  "phba->eh_sgl_hndl_avbl=%d \n",
2925                  phba->io_sgl_hndl_avbl,
2926                  phba->eh_sgl_hndl_avbl);
2927         mem_descr_sg = phba->init_mem;
2928         mem_descr_sg += HWI_MEM_SGE;
2929         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
2930                  mem_descr_sg->num_elements);
2931         arr_index = 0;
2932         idx = 0;
2933         while (idx < mem_descr_sg->num_elements) {
2934                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
2935
2936                 for (i = 0;
2937                      i < (mem_descr_sg->mem_array[idx].size) /
2938                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
2939                      i++) {
2940                         if (arr_index < phba->params.ios_per_ctrl)
2941                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
2942                         else
2943                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
2944                                                 phba->params.ios_per_ctrl];
2945                         psgl_handle->pfrag = pfrag;
2946                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
2947                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
2948                         pfrag += phba->params.num_sge_per_io;
2949                         psgl_handle->sgl_index =
2950                                 phba->fw_config.iscsi_cid_start + arr_index++;
2951                 }
2952                 idx++;
2953         }
2954         phba->io_sgl_free_index = 0;
2955         phba->io_sgl_alloc_index = 0;
2956         phba->eh_sgl_free_index = 0;
2957         phba->eh_sgl_alloc_index = 0;
2958         return 0;
2959 }
2960
2961 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
2962 {
2963         int i, new_cid;
2964
2965         phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
2966                                   GFP_KERNEL);
2967         if (!phba->cid_array) {
2968                 shost_printk(KERN_ERR, phba->shost,
2969                              "Failed to allocate memory in "
2970                              "hba_setup_cid_tbls\n");
2971                 return -ENOMEM;
2972         }
2973         phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
2974                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
2975         if (!phba->ep_array) {
2976                 shost_printk(KERN_ERR, phba->shost,
2977                              "Failed to allocate memory in "
2978                              "hba_setup_cid_tbls \n");
2979                 kfree(phba->cid_array);
2980                 return -ENOMEM;
2981         }
2982         new_cid = phba->fw_config.iscsi_icd_start;
2983         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2984                 phba->cid_array[i] = new_cid;
2985                 new_cid += 2;
2986         }
2987         phba->avlbl_cids = phba->params.cxns_per_ctrl;
2988         return 0;
2989 }
2990
2991 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
2992 {
2993         struct be_ctrl_info *ctrl = &phba->ctrl;
2994         struct hwi_controller *phwi_ctrlr;
2995         struct hwi_context_memory *phwi_context;
2996         struct be_queue_info *eq;
2997         u8 __iomem *addr;
2998         u32 reg, i;
2999         u32 enabled;
3000
3001         phwi_ctrlr = phba->phwi_ctrlr;
3002         phwi_context = phwi_ctrlr->phwi_ctxt;
3003
3004         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3005                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3006         reg = ioread32(addr);
3007         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3008
3009         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3010         if (!enabled) {
3011                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3012                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3013                 iowrite32(reg, addr);
3014                 for (i = 0; i <= phba->num_cpus; i++) {
3015                         eq = &phwi_context->be_eq[i].q;
3016                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3017                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3018                 }
3019         } else
3020                 shost_printk(KERN_WARNING, phba->shost,
3021                              "In hwi_enable_intr, Not Enabled \n");
3022         return true;
3023 }
3024
3025 static void hwi_disable_intr(struct beiscsi_hba *phba)
3026 {
3027         struct be_ctrl_info *ctrl = &phba->ctrl;
3028
3029         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3030         u32 reg = ioread32(addr);
3031
3032         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3033         if (enabled) {
3034                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3035                 iowrite32(reg, addr);
3036         } else
3037                 shost_printk(KERN_WARNING, phba->shost,
3038                              "In hwi_disable_intr, Already Disabled \n");
3039 }
3040
3041 static int beiscsi_init_port(struct beiscsi_hba *phba)
3042 {
3043         int ret;
3044
3045         ret = beiscsi_init_controller(phba);
3046         if (ret < 0) {
3047                 shost_printk(KERN_ERR, phba->shost,
3048                              "beiscsi_dev_probe - Failed in"
3049                              "beiscsi_init_controller \n");
3050                 return ret;
3051         }
3052         ret = beiscsi_init_sgl_handle(phba);
3053         if (ret < 0) {
3054                 shost_printk(KERN_ERR, phba->shost,
3055                              "beiscsi_dev_probe - Failed in"
3056                              "beiscsi_init_sgl_handle \n");
3057                 goto do_cleanup_ctrlr;
3058         }
3059
3060         if (hba_setup_cid_tbls(phba)) {
3061                 shost_printk(KERN_ERR, phba->shost,
3062                              "Failed in hba_setup_cid_tbls\n");
3063                 kfree(phba->io_sgl_hndl_base);
3064                 kfree(phba->eh_sgl_hndl_base);
3065                 goto do_cleanup_ctrlr;
3066         }
3067
3068         return ret;
3069
3070 do_cleanup_ctrlr:
3071         hwi_cleanup(phba);
3072         return ret;
3073 }
3074
3075 static void hwi_purge_eq(struct beiscsi_hba *phba)
3076 {
3077         struct hwi_controller *phwi_ctrlr;
3078         struct hwi_context_memory *phwi_context;
3079         struct be_queue_info *eq;
3080         struct be_eq_entry *eqe = NULL;
3081         int i, eq_msix;
3082
3083         phwi_ctrlr = phba->phwi_ctrlr;
3084         phwi_context = phwi_ctrlr->phwi_ctxt;
3085         if (phba->msix_enabled)
3086                 eq_msix = 1;
3087         else
3088                 eq_msix = 0;
3089
3090         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3091                 eq = &phwi_context->be_eq[i].q;
3092                 eqe = queue_tail_node(eq);
3093
3094                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3095                                         & EQE_VALID_MASK) {
3096                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3097                         queue_tail_inc(eq);
3098                         eqe = queue_tail_node(eq);
3099                 }
3100         }
3101 }
3102
3103 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3104 {
3105         unsigned char mgmt_status;
3106
3107         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3108         if (mgmt_status)
3109                 shost_printk(KERN_WARNING, phba->shost,
3110                              "mgmt_epfw_cleanup FAILED \n");
3111         hwi_cleanup(phba);
3112         hwi_purge_eq(phba);
3113         kfree(phba->io_sgl_hndl_base);
3114         kfree(phba->eh_sgl_hndl_base);
3115         kfree(phba->cid_array);
3116         kfree(phba->ep_array);
3117 }
3118
3119 void
3120 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3121                            struct beiscsi_offload_params *params)
3122 {
3123         struct wrb_handle *pwrb_handle;
3124         struct iscsi_target_context_update_wrb *pwrb = NULL;
3125         struct be_mem_descriptor *mem_descr;
3126         struct beiscsi_hba *phba = beiscsi_conn->phba;
3127         u32 doorbell = 0;
3128
3129         /*
3130          * We can always use 0 here because it is reserved by libiscsi for
3131          * login/startup related tasks.
3132          */
3133         pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
3134         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3135         memset(pwrb, 0, sizeof(*pwrb));
3136         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3137                       max_burst_length, pwrb, params->dw[offsetof
3138                       (struct amap_beiscsi_offload_params,
3139                       max_burst_length) / 32]);
3140         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3141                       max_send_data_segment_length, pwrb,
3142                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3143                       max_send_data_segment_length) / 32]);
3144         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3145                       first_burst_length,
3146                       pwrb,
3147                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3148                       first_burst_length) / 32]);
3149
3150         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3151                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3152                       erl) / 32] & OFFLD_PARAMS_ERL));
3153         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3154                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3155                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3156         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3157                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3158                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3159         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3160                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3161                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3162         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3163                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3164                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3165         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3166                       pwrb,
3167                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3168                       exp_statsn) / 32] + 1));
3169         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3170                       0x7);
3171         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3172                       pwrb, pwrb_handle->wrb_index);
3173         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3174                       pwrb, pwrb_handle->nxt_wrb_index);
3175         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3176                         session_state, pwrb, 0);
3177         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3178                       pwrb, 1);
3179         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3180                       pwrb, 0);
3181         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3182                       0);
3183
3184         mem_descr = phba->init_mem;
3185         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3186
3187         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3188                         pad_buffer_addr_hi, pwrb,
3189                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3190         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3191                         pad_buffer_addr_lo, pwrb,
3192                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3193
3194         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3195
3196         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3197         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3198                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3199         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3200
3201         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3202 }
3203
3204 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3205                               int *index, int *age)
3206 {
3207         *index = (int)itt;
3208         if (age)
3209                 *age = conn->session->age;
3210 }
3211
3212 /**
3213  * beiscsi_alloc_pdu - allocates pdu and related resources
3214  * @task: libiscsi task
3215  * @opcode: opcode of pdu for task
3216  *
3217  * This is called with the session lock held. It will allocate
3218  * the wrb and sgl if needed for the command. And it will prep
3219  * the pdu's itt. beiscsi_parse_pdu will later translate
3220  * the pdu itt to the libiscsi task itt.
3221  */
3222 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3223 {
3224         struct beiscsi_io_task *io_task = task->dd_data;
3225         struct iscsi_conn *conn = task->conn;
3226         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3227         struct beiscsi_hba *phba = beiscsi_conn->phba;
3228         struct hwi_wrb_context *pwrb_context;
3229         struct hwi_controller *phwi_ctrlr;
3230         itt_t itt;
3231         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3232         dma_addr_t paddr;
3233
3234         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3235                                           GFP_KERNEL, &paddr);
3236         if (!io_task->cmd_bhs)
3237                 return -ENOMEM;
3238         io_task->bhs_pa.u.a64.address = paddr;
3239         io_task->libiscsi_itt = (itt_t)task->itt;
3240         io_task->pwrb_handle = alloc_wrb_handle(phba,
3241                                                 beiscsi_conn->beiscsi_conn_cid,
3242                                                 task->itt);
3243         io_task->conn = beiscsi_conn;
3244
3245         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3246         task->hdr_max = sizeof(struct be_cmd_bhs);
3247
3248         if (task->sc) {
3249                 spin_lock(&phba->io_sgl_lock);
3250                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3251                 spin_unlock(&phba->io_sgl_lock);
3252                 if (!io_task->psgl_handle)
3253                         goto free_hndls;
3254         } else {
3255                 io_task->scsi_cmnd = NULL;
3256                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3257                         if (!beiscsi_conn->login_in_progress) {
3258                                 spin_lock(&phba->mgmt_sgl_lock);
3259                                 io_task->psgl_handle = (struct sgl_handle *)
3260                                                 alloc_mgmt_sgl_handle(phba);
3261                                 spin_unlock(&phba->mgmt_sgl_lock);
3262                                 if (!io_task->psgl_handle)
3263                                         goto free_hndls;
3264
3265                                 beiscsi_conn->login_in_progress = 1;
3266                                 beiscsi_conn->plogin_sgl_handle =
3267                                                         io_task->psgl_handle;
3268                         } else {
3269                                 io_task->psgl_handle =
3270                                                 beiscsi_conn->plogin_sgl_handle;
3271                         }
3272                 } else {
3273                         spin_lock(&phba->mgmt_sgl_lock);
3274                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3275                         spin_unlock(&phba->mgmt_sgl_lock);
3276                         if (!io_task->psgl_handle)
3277                                 goto free_hndls;
3278                 }
3279         }
3280         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3281                                  wrb_index << 16) | (unsigned int)
3282                                 (io_task->psgl_handle->sgl_index));
3283         io_task->pwrb_handle->pio_handle = task;
3284
3285         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3286         return 0;
3287
3288 free_hndls:
3289         phwi_ctrlr = phba->phwi_ctrlr;
3290         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
3291         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3292         io_task->pwrb_handle = NULL;
3293         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3294                       io_task->bhs_pa.u.a64.address);
3295         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3296         return -ENOMEM;
3297 }
3298
3299 static void beiscsi_cleanup_task(struct iscsi_task *task)
3300 {
3301         struct beiscsi_io_task *io_task = task->dd_data;
3302         struct iscsi_conn *conn = task->conn;
3303         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3304         struct beiscsi_hba *phba = beiscsi_conn->phba;
3305         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3306         struct hwi_wrb_context *pwrb_context;
3307         struct hwi_controller *phwi_ctrlr;
3308
3309         phwi_ctrlr = phba->phwi_ctrlr;
3310         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
3311         if (io_task->pwrb_handle) {
3312                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3313                 io_task->pwrb_handle = NULL;
3314         }
3315
3316         if (io_task->cmd_bhs) {
3317                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3318                               io_task->bhs_pa.u.a64.address);
3319         }
3320
3321         if (task->sc) {
3322                 if (io_task->psgl_handle) {
3323                         spin_lock(&phba->io_sgl_lock);
3324                         free_io_sgl_handle(phba, io_task->psgl_handle);
3325                         spin_unlock(&phba->io_sgl_lock);
3326                         io_task->psgl_handle = NULL;
3327                 }
3328         } else {
3329                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3330                         return;
3331                 if (io_task->psgl_handle) {
3332                         spin_lock(&phba->mgmt_sgl_lock);
3333                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3334                         spin_unlock(&phba->mgmt_sgl_lock);
3335                         io_task->psgl_handle = NULL;
3336                 }
3337         }
3338 }
3339
3340 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3341                           unsigned int num_sg, unsigned int xferlen,
3342                           unsigned int writedir)
3343 {
3344
3345         struct beiscsi_io_task *io_task = task->dd_data;
3346         struct iscsi_conn *conn = task->conn;
3347         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3348         struct beiscsi_hba *phba = beiscsi_conn->phba;
3349         struct iscsi_wrb *pwrb = NULL;
3350         unsigned int doorbell = 0;
3351
3352         pwrb = io_task->pwrb_handle->pwrb;
3353         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3354         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3355
3356         if (writedir) {
3357                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3358                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3359                               &io_task->cmd_bhs->iscsi_data_pdu,
3360                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3361                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3362                               &io_task->cmd_bhs->iscsi_data_pdu,
3363                               ISCSI_OPCODE_SCSI_DATA_OUT);
3364                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3365                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3366                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3367                                       INI_WR_CMD);
3368                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3369         } else {
3370                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3371                                       INI_RD_CMD);
3372                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3373         }
3374         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3375                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3376                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3377
3378         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3379                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3380                                   lun[0]));
3381         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3382         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3383                       io_task->pwrb_handle->wrb_index);
3384         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3385                       be32_to_cpu(task->cmdsn));
3386         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3387                       io_task->psgl_handle->sgl_index);
3388
3389         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3390
3391         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3392                       io_task->pwrb_handle->nxt_wrb_index);
3393         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3394
3395         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3396         doorbell |= (io_task->pwrb_handle->wrb_index &
3397                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3398         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3399
3400         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3401         return 0;
3402 }
3403
3404 static int beiscsi_mtask(struct iscsi_task *task)
3405 {
3406         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3407         struct iscsi_conn *conn = task->conn;
3408         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3409         struct beiscsi_hba *phba = beiscsi_conn->phba;
3410         struct iscsi_session *session;
3411         struct iscsi_wrb *pwrb = NULL;
3412         struct hwi_controller *phwi_ctrlr;
3413         struct hwi_wrb_context *pwrb_context;
3414         struct wrb_handle *pwrb_handle;
3415         unsigned int doorbell = 0;
3416         unsigned int i, cid;
3417         struct iscsi_task *aborted_task;
3418
3419         cid = beiscsi_conn->beiscsi_conn_cid;
3420         pwrb = io_task->pwrb_handle->pwrb;
3421         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3422                       be32_to_cpu(task->cmdsn));
3423         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3424                       io_task->pwrb_handle->wrb_index);
3425         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3426                       io_task->psgl_handle->sgl_index);
3427
3428         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3429         case ISCSI_OP_LOGIN:
3430                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3431                                       TGT_DM_CMD);
3432                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3433                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3434                 hwi_write_buffer(pwrb, task);
3435                 break;
3436         case ISCSI_OP_NOOP_OUT:
3437                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3438                                       INI_RD_CMD);
3439                 hwi_write_buffer(pwrb, task);
3440                 break;
3441         case ISCSI_OP_TEXT:
3442                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3443                                       INI_WR_CMD);
3444                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3445                 hwi_write_buffer(pwrb, task);
3446                 break;
3447         case ISCSI_OP_SCSI_TMFUNC:
3448                 session = conn->session;
3449                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3450                 phwi_ctrlr = phba->phwi_ctrlr;
3451                 pwrb_context = &phwi_ctrlr->wrb_context[cid];
3452                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3453                                                                 >> 16];
3454                 aborted_task = pwrb_handle->pio_handle;
3455                  if (!aborted_task)
3456                         return 0;
3457
3458                 aborted_io_task = aborted_task->dd_data;
3459                 if (!aborted_io_task->scsi_cmnd)
3460                         return 0;
3461
3462                 mgmt_invalidate_icds(phba,
3463                                      aborted_io_task->psgl_handle->sgl_index,
3464                                      cid);
3465                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3466                                       INI_TMF_CMD);
3467                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3468                 hwi_write_buffer(pwrb, task);
3469                 break;
3470         case ISCSI_OP_LOGOUT:
3471                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3472                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3473                                 HWH_TYPE_LOGOUT);
3474                 hwi_write_buffer(pwrb, task);
3475                 break;
3476
3477         default:
3478                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3479                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3480                 return -EINVAL;
3481         }
3482
3483         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3484                       be32_to_cpu(task->data_count));
3485         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3486                       io_task->pwrb_handle->nxt_wrb_index);
3487         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3488
3489         doorbell |= cid & DB_WRB_POST_CID_MASK;
3490         doorbell |= (io_task->pwrb_handle->wrb_index &
3491                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3492         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3493         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3494         return 0;
3495 }
3496
3497 static int beiscsi_task_xmit(struct iscsi_task *task)
3498 {
3499         struct iscsi_conn *conn = task->conn;
3500         struct beiscsi_io_task *io_task = task->dd_data;
3501         struct scsi_cmnd *sc = task->sc;
3502         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3503         struct scatterlist *sg;
3504         int num_sg;
3505         unsigned int  writedir = 0, xferlen = 0;
3506
3507         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3508                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3509                  task, conn, beiscsi_conn);
3510         if (!sc)
3511                 return beiscsi_mtask(task);
3512
3513         io_task->scsi_cmnd = sc;
3514         num_sg = scsi_dma_map(sc);
3515         if (num_sg < 0) {
3516                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3517                 return num_sg;
3518         }
3519         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3520                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3521         xferlen = scsi_bufflen(sc);
3522         sg = scsi_sglist(sc);
3523         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3524                 writedir = 1;
3525                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3526                          task->imm_count);
3527         } else
3528                 writedir = 0;
3529         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3530 }
3531
3532
3533 static void beiscsi_remove(struct pci_dev *pcidev)
3534 {
3535         struct beiscsi_hba *phba = NULL;
3536         struct hwi_controller *phwi_ctrlr;
3537         struct hwi_context_memory *phwi_context;
3538         struct be_eq_obj *pbe_eq;
3539         unsigned int i, msix_vec;
3540
3541         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3542         if (!phba) {
3543                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3544                 return;
3545         }
3546
3547         phwi_ctrlr = phba->phwi_ctrlr;
3548         phwi_context = phwi_ctrlr->phwi_ctxt;
3549         hwi_disable_intr(phba);
3550         if (phba->msix_enabled) {
3551                 for (i = 0; i <= phba->num_cpus; i++) {
3552                         msix_vec = phba->msix_entries[i].vector;
3553                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3554                 }
3555         } else
3556                 if (phba->pcidev->irq)
3557                         free_irq(phba->pcidev->irq, phba);
3558         pci_disable_msix(phba->pcidev);
3559         destroy_workqueue(phba->wq);
3560         if (blk_iopoll_enabled)
3561                 for (i = 0; i < phba->num_cpus; i++) {
3562                         pbe_eq = &phwi_context->be_eq[i];
3563                         blk_iopoll_disable(&pbe_eq->iopoll);
3564                 }
3565
3566         beiscsi_clean_port(phba);
3567         beiscsi_free_mem(phba);
3568         beiscsi_unmap_pci_function(phba);
3569         pci_free_consistent(phba->pcidev,
3570                             phba->ctrl.mbox_mem_alloced.size,
3571                             phba->ctrl.mbox_mem_alloced.va,
3572                             phba->ctrl.mbox_mem_alloced.dma);
3573         iscsi_host_remove(phba->shost);
3574         pci_dev_put(phba->pcidev);
3575         iscsi_host_free(phba->shost);
3576 }
3577
3578 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3579 {
3580         int i, status;
3581
3582         for (i = 0; i <= phba->num_cpus; i++)
3583                 phba->msix_entries[i].entry = i;
3584
3585         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3586                                  (phba->num_cpus + 1));
3587         if (!status)
3588                 phba->msix_enabled = true;
3589
3590         return;
3591 }
3592
3593 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3594                                 const struct pci_device_id *id)
3595 {
3596         struct beiscsi_hba *phba = NULL;
3597         struct hwi_controller *phwi_ctrlr;
3598         struct hwi_context_memory *phwi_context;
3599         struct be_eq_obj *pbe_eq;
3600         int ret, msix_vec, num_cpus, i;
3601
3602         ret = beiscsi_enable_pci(pcidev);
3603         if (ret < 0) {
3604                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3605                              "Failed to enable pci device \n");
3606                 return ret;
3607         }
3608
3609         phba = beiscsi_hba_alloc(pcidev);
3610         if (!phba) {
3611                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3612                         " Failed in beiscsi_hba_alloc \n");
3613                 goto disable_pci;
3614         }
3615         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3616
3617         pci_set_drvdata(pcidev, phba);
3618         if (enable_msix)
3619                 num_cpus = find_num_cpus();
3620         else
3621                 num_cpus = 1;
3622         phba->num_cpus = num_cpus;
3623         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3624
3625         if (enable_msix)
3626                 beiscsi_msix_enable(phba);
3627         ret = be_ctrl_init(phba, pcidev);
3628         if (ret) {
3629                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3630                                 "Failed in be_ctrl_init\n");
3631                 goto hba_free;
3632         }
3633
3634         spin_lock_init(&phba->io_sgl_lock);
3635         spin_lock_init(&phba->mgmt_sgl_lock);
3636         spin_lock_init(&phba->isr_lock);
3637         beiscsi_get_params(phba);
3638         ret = beiscsi_init_port(phba);
3639         if (ret < 0) {
3640                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3641                              "Failed in beiscsi_init_port\n");
3642                 goto free_port;
3643         }
3644
3645         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3646                  phba->shost->host_no);
3647         phba->wq = create_workqueue(phba->wq_name);
3648         if (!phba->wq) {
3649                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3650                                 "Failed to allocate work queue\n");
3651                 goto free_twq;
3652         }
3653
3654         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3655
3656         phwi_ctrlr = phba->phwi_ctrlr;
3657         phwi_context = phwi_ctrlr->phwi_ctxt;
3658         if (blk_iopoll_enabled) {
3659                 for (i = 0; i < phba->num_cpus; i++) {
3660                         pbe_eq = &phwi_context->be_eq[i];
3661                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3662                                         be_iopoll);
3663                         blk_iopoll_enable(&pbe_eq->iopoll);
3664                 }
3665         }
3666         ret = beiscsi_init_irqs(phba);
3667         if (ret < 0) {
3668                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3669                              "Failed to beiscsi_init_irqs\n");
3670                 goto free_blkenbld;
3671         }
3672         ret = hwi_enable_intr(phba);
3673         if (ret < 0) {
3674                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3675                              "Failed to hwi_enable_intr\n");
3676                 goto free_ctrlr;
3677         }
3678         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3679         return 0;
3680
3681 free_ctrlr:
3682         if (phba->msix_enabled) {
3683                 for (i = 0; i <= phba->num_cpus; i++) {
3684                         msix_vec = phba->msix_entries[i].vector;
3685                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3686                 }
3687         } else
3688                 if (phba->pcidev->irq)
3689                         free_irq(phba->pcidev->irq, phba);
3690         pci_disable_msix(phba->pcidev);
3691 free_blkenbld:
3692         destroy_workqueue(phba->wq);
3693         if (blk_iopoll_enabled)
3694                 for (i = 0; i < phba->num_cpus; i++) {
3695                         pbe_eq = &phwi_context->be_eq[i];
3696                         blk_iopoll_disable(&pbe_eq->iopoll);
3697                 }
3698 free_twq:
3699         beiscsi_clean_port(phba);
3700         beiscsi_free_mem(phba);
3701 free_port:
3702         pci_free_consistent(phba->pcidev,
3703                             phba->ctrl.mbox_mem_alloced.size,
3704                             phba->ctrl.mbox_mem_alloced.va,
3705                            phba->ctrl.mbox_mem_alloced.dma);
3706         beiscsi_unmap_pci_function(phba);
3707 hba_free:
3708         iscsi_host_remove(phba->shost);
3709         pci_dev_put(phba->pcidev);
3710         iscsi_host_free(phba->shost);
3711 disable_pci:
3712         pci_disable_device(pcidev);
3713         return ret;
3714 }
3715
3716 struct iscsi_transport beiscsi_iscsi_transport = {
3717         .owner = THIS_MODULE,
3718         .name = DRV_NAME,
3719         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3720                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3721         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3722                 ISCSI_MAX_XMIT_DLENGTH |
3723                 ISCSI_HDRDGST_EN |
3724                 ISCSI_DATADGST_EN |
3725                 ISCSI_INITIAL_R2T_EN |
3726                 ISCSI_MAX_R2T |
3727                 ISCSI_IMM_DATA_EN |
3728                 ISCSI_FIRST_BURST |
3729                 ISCSI_MAX_BURST |
3730                 ISCSI_PDU_INORDER_EN |
3731                 ISCSI_DATASEQ_INORDER_EN |
3732                 ISCSI_ERL |
3733                 ISCSI_CONN_PORT |
3734                 ISCSI_CONN_ADDRESS |
3735                 ISCSI_EXP_STATSN |
3736                 ISCSI_PERSISTENT_PORT |
3737                 ISCSI_PERSISTENT_ADDRESS |
3738                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3739                 ISCSI_USERNAME | ISCSI_PASSWORD |
3740                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3741                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3742                 ISCSI_LU_RESET_TMO |
3743                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3744                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3745         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3746                                 ISCSI_HOST_INITIATOR_NAME,
3747         .create_session = beiscsi_session_create,
3748         .destroy_session = beiscsi_session_destroy,
3749         .create_conn = beiscsi_conn_create,
3750         .bind_conn = beiscsi_conn_bind,
3751         .destroy_conn = iscsi_conn_teardown,
3752         .set_param = beiscsi_set_param,
3753         .get_conn_param = beiscsi_conn_get_param,
3754         .get_session_param = iscsi_session_get_param,
3755         .get_host_param = beiscsi_get_host_param,
3756         .start_conn = beiscsi_conn_start,
3757         .stop_conn = beiscsi_conn_stop,
3758         .send_pdu = iscsi_conn_send_pdu,
3759         .xmit_task = beiscsi_task_xmit,
3760         .cleanup_task = beiscsi_cleanup_task,
3761         .alloc_pdu = beiscsi_alloc_pdu,
3762         .parse_pdu_itt = beiscsi_parse_pdu,
3763         .get_stats = beiscsi_conn_get_stats,
3764         .ep_connect = beiscsi_ep_connect,
3765         .ep_poll = beiscsi_ep_poll,
3766         .ep_disconnect = beiscsi_ep_disconnect,
3767         .session_recovery_timedout = iscsi_session_recovery_timedout,
3768 };
3769
3770 static struct pci_driver beiscsi_pci_driver = {
3771         .name = DRV_NAME,
3772         .probe = beiscsi_dev_probe,
3773         .remove = beiscsi_remove,
3774         .id_table = beiscsi_pci_id_table
3775 };
3776
3777
3778 static int __init beiscsi_module_init(void)
3779 {
3780         int ret;
3781
3782         beiscsi_scsi_transport =
3783                         iscsi_register_transport(&beiscsi_iscsi_transport);
3784         if (!beiscsi_scsi_transport) {
3785                 SE_DEBUG(DBG_LVL_1,
3786                          "beiscsi_module_init - Unable to  register beiscsi"
3787                          "transport.\n");
3788                 ret = -ENOMEM;
3789         }
3790         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3791                  &beiscsi_iscsi_transport);
3792
3793         ret = pci_register_driver(&beiscsi_pci_driver);
3794         if (ret) {
3795                 SE_DEBUG(DBG_LVL_1,
3796                          "beiscsi_module_init - Unable to  register"
3797                          "beiscsi pci driver.\n");
3798                 goto unregister_iscsi_transport;
3799         }
3800         return 0;
3801
3802 unregister_iscsi_transport:
3803         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3804         return ret;
3805 }
3806
3807 static void __exit beiscsi_module_exit(void)
3808 {
3809         pci_unregister_driver(&beiscsi_pci_driver);
3810         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3811 }
3812
3813 module_init(beiscsi_module_init);
3814 module_exit(beiscsi_module_exit);