[SCSI] libiscsi: add warm target reset tmf support
[safe/jmp/linux-2.6] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43 static unsigned int ring_mode;
44
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53                                    "contiguous memory that can be allocated."
54                                    "Range is 16 - 128");
55
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58         blk_queue_max_segment_size(sdev->request_queue, 65536);
59         return 0;
60 }
61
62 /*------------------- PCI Driver operations and data ----------------- */
63 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69         { 0 }
70 };
71 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
73 static struct scsi_host_template beiscsi_sht = {
74         .module = THIS_MODULE,
75         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76         .proc_name = DRV_NAME,
77         .queuecommand = iscsi_queuecommand,
78         .eh_abort_handler = iscsi_eh_abort,
79         .change_queue_depth = iscsi_change_queue_depth,
80         .slave_configure = beiscsi_slave_configure,
81         .target_alloc = iscsi_target_alloc,
82         .eh_device_reset_handler = iscsi_eh_device_reset,
83         .eh_target_reset_handler = iscsi_eh_target_reset,
84         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85         .can_queue = BE2_IO_DEPTH,
86         .this_id = -1,
87         .max_sectors = BEISCSI_MAX_SECTORS,
88         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
89         .use_clustering = ENABLE_CLUSTERING,
90 };
91
92 static struct scsi_transport_template *beiscsi_scsi_transport;
93
94 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95 {
96         struct beiscsi_hba *phba;
97         struct Scsi_Host *shost;
98
99         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100         if (!shost) {
101                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102                         "iscsi_host_alloc failed \n");
103                 return NULL;
104         }
105         shost->dma_boundary = pcidev->dma_mask;
106         shost->max_id = BE2_MAX_SESSIONS;
107         shost->max_channel = 0;
108         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109         shost->max_lun = BEISCSI_NUM_MAX_LUN;
110         shost->transportt = beiscsi_scsi_transport;
111         phba = iscsi_host_priv(shost);
112         memset(phba, 0, sizeof(*phba));
113         phba->shost = shost;
114         phba->pcidev = pci_dev_get(pcidev);
115
116         if (iscsi_host_add(shost, &phba->pcidev->dev))
117                 goto free_devices;
118         return phba;
119
120 free_devices:
121         pci_dev_put(phba->pcidev);
122         iscsi_host_free(phba->shost);
123         return NULL;
124 }
125
126 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127 {
128         if (phba->csr_va) {
129                 iounmap(phba->csr_va);
130                 phba->csr_va = NULL;
131         }
132         if (phba->db_va) {
133                 iounmap(phba->db_va);
134                 phba->db_va = NULL;
135         }
136         if (phba->pci_va) {
137                 iounmap(phba->pci_va);
138                 phba->pci_va = NULL;
139         }
140 }
141
142 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143                                 struct pci_dev *pcidev)
144 {
145         u8 __iomem *addr;
146
147         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148                                pci_resource_len(pcidev, 2));
149         if (addr == NULL)
150                 return -ENOMEM;
151         phba->ctrl.csr = addr;
152         phba->csr_va = addr;
153         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
154
155         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156         if (addr == NULL)
157                 goto pci_map_err;
158         phba->ctrl.db = addr;
159         phba->db_va = addr;
160         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
161
162         addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163                                pci_resource_len(pcidev, 1));
164         if (addr == NULL)
165                 goto pci_map_err;
166         phba->ctrl.pcicfg = addr;
167         phba->pci_va = addr;
168         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169         return 0;
170
171 pci_map_err:
172         beiscsi_unmap_pci_function(phba);
173         return -ENOMEM;
174 }
175
176 static int beiscsi_enable_pci(struct pci_dev *pcidev)
177 {
178         int ret;
179
180         ret = pci_enable_device(pcidev);
181         if (ret) {
182                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183                         "failed. Returning -ENODEV\n");
184                 return ret;
185         }
186
187         pci_set_master(pcidev);
188         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190                 if (ret) {
191                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192                         pci_disable_device(pcidev);
193                         return ret;
194                 }
195         }
196         return 0;
197 }
198
199 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
200 {
201         struct be_ctrl_info *ctrl = &phba->ctrl;
202         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204         int status = 0;
205
206         ctrl->pdev = pdev;
207         status = beiscsi_map_pci_bars(phba, pdev);
208         if (status)
209                 return status;
210         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212                                                   mbox_mem_alloc->size,
213                                                   &mbox_mem_alloc->dma);
214         if (!mbox_mem_alloc->va) {
215                 beiscsi_unmap_pci_function(phba);
216                 status = -ENOMEM;
217                 return status;
218         }
219
220         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224         spin_lock_init(&ctrl->mbox_lock);
225         spin_lock_init(&phba->ctrl.mcc_lock);
226         spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
228         return status;
229 }
230
231 static void beiscsi_get_params(struct beiscsi_hba *phba)
232 {
233         phba->params.ios_per_ctrl = BE2_IO_DEPTH;
234         phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
235         phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
236         phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
237         phba->params.num_sge_per_io = BE2_SGE;
238         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
239         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
240         phba->params.eq_timer = 64;
241         phba->params.num_eq_entries =
242             (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
243                                                                 512) + 1) * 512;
244         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
245                                 ? 1024 : phba->params.num_eq_entries;
246         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
247                  phba->params.num_eq_entries);
248         phba->params.num_cq_entries =
249             (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
250                                                                 512) + 1) * 512;
251         SE_DEBUG(DBG_LVL_8,
252                 "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
253                 "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
254                 phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
255                 BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
256         phba->params.wrbs_per_cxn = 256;
257 }
258
259 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
260                            unsigned int id, unsigned int clr_interrupt,
261                            unsigned int num_processed,
262                            unsigned char rearm, unsigned char event)
263 {
264         u32 val = 0;
265         val |= id & DB_EQ_RING_ID_MASK;
266         if (rearm)
267                 val |= 1 << DB_EQ_REARM_SHIFT;
268         if (clr_interrupt)
269                 val |= 1 << DB_EQ_CLR_SHIFT;
270         if (event)
271                 val |= 1 << DB_EQ_EVNT_SHIFT;
272         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
273         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
274 }
275
276 /**
277  * be_isr_mcc - The isr routine of the driver.
278  * @irq: Not used
279  * @dev_id: Pointer to host adapter structure
280  */
281 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
282 {
283         struct beiscsi_hba *phba;
284         struct be_eq_entry *eqe = NULL;
285         struct be_queue_info *eq;
286         struct be_queue_info *mcc;
287         unsigned int num_eq_processed;
288         struct be_eq_obj *pbe_eq;
289         unsigned long flags;
290
291         pbe_eq = dev_id;
292         eq = &pbe_eq->q;
293         phba =  pbe_eq->phba;
294         mcc = &phba->ctrl.mcc_obj.cq;
295         eqe = queue_tail_node(eq);
296         if (!eqe)
297                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
298
299         num_eq_processed = 0;
300
301         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
302                                 & EQE_VALID_MASK) {
303                 if (((eqe->dw[offsetof(struct amap_eq_entry,
304                      resource_id) / 32] &
305                      EQE_RESID_MASK) >> 16) == mcc->id) {
306                         spin_lock_irqsave(&phba->isr_lock, flags);
307                         phba->todo_mcc_cq = 1;
308                         spin_unlock_irqrestore(&phba->isr_lock, flags);
309                 }
310                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
311                 queue_tail_inc(eq);
312                 eqe = queue_tail_node(eq);
313                 num_eq_processed++;
314         }
315         if (phba->todo_mcc_cq)
316                 queue_work(phba->wq, &phba->work_cqs);
317         if (num_eq_processed)
318                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
319
320         return IRQ_HANDLED;
321 }
322
323 /**
324  * be_isr_msix - The isr routine of the driver.
325  * @irq: Not used
326  * @dev_id: Pointer to host adapter structure
327  */
328 static irqreturn_t be_isr_msix(int irq, void *dev_id)
329 {
330         struct beiscsi_hba *phba;
331         struct be_eq_entry *eqe = NULL;
332         struct be_queue_info *eq;
333         struct be_queue_info *cq;
334         unsigned int num_eq_processed;
335         struct be_eq_obj *pbe_eq;
336         unsigned long flags;
337
338         pbe_eq = dev_id;
339         eq = &pbe_eq->q;
340         cq = pbe_eq->cq;
341         eqe = queue_tail_node(eq);
342         if (!eqe)
343                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
344
345         phba = pbe_eq->phba;
346         num_eq_processed = 0;
347         if (blk_iopoll_enabled) {
348                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
349                                         & EQE_VALID_MASK) {
350                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
351                                 blk_iopoll_sched(&pbe_eq->iopoll);
352
353                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
354                         queue_tail_inc(eq);
355                         eqe = queue_tail_node(eq);
356                         num_eq_processed++;
357                 }
358                 if (num_eq_processed)
359                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
360
361                 return IRQ_HANDLED;
362         } else {
363                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
364                                                 & EQE_VALID_MASK) {
365                         spin_lock_irqsave(&phba->isr_lock, flags);
366                         phba->todo_cq = 1;
367                         spin_unlock_irqrestore(&phba->isr_lock, flags);
368                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
369                         queue_tail_inc(eq);
370                         eqe = queue_tail_node(eq);
371                         num_eq_processed++;
372                 }
373                 if (phba->todo_cq)
374                         queue_work(phba->wq, &phba->work_cqs);
375
376                 if (num_eq_processed)
377                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
378
379                 return IRQ_HANDLED;
380         }
381 }
382
383 /**
384  * be_isr - The isr routine of the driver.
385  * @irq: Not used
386  * @dev_id: Pointer to host adapter structure
387  */
388 static irqreturn_t be_isr(int irq, void *dev_id)
389 {
390         struct beiscsi_hba *phba;
391         struct hwi_controller *phwi_ctrlr;
392         struct hwi_context_memory *phwi_context;
393         struct be_eq_entry *eqe = NULL;
394         struct be_queue_info *eq;
395         struct be_queue_info *cq;
396         struct be_queue_info *mcc;
397         unsigned long flags, index;
398         unsigned int num_mcceq_processed, num_ioeq_processed;
399         struct be_ctrl_info *ctrl;
400         struct be_eq_obj *pbe_eq;
401         int isr;
402
403         phba = dev_id;
404         ctrl = &phba->ctrl;;
405         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
406                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
407         if (!isr)
408                 return IRQ_NONE;
409
410         phwi_ctrlr = phba->phwi_ctrlr;
411         phwi_context = phwi_ctrlr->phwi_ctxt;
412         pbe_eq = &phwi_context->be_eq[0];
413
414         eq = &phwi_context->be_eq[0].q;
415         mcc = &phba->ctrl.mcc_obj.cq;
416         index = 0;
417         eqe = queue_tail_node(eq);
418         if (!eqe)
419                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
420
421         num_ioeq_processed = 0;
422         num_mcceq_processed = 0;
423         if (blk_iopoll_enabled) {
424                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
425                                         & EQE_VALID_MASK) {
426                         if (((eqe->dw[offsetof(struct amap_eq_entry,
427                              resource_id) / 32] &
428                              EQE_RESID_MASK) >> 16) == mcc->id) {
429                                 spin_lock_irqsave(&phba->isr_lock, flags);
430                                 phba->todo_mcc_cq = 1;
431                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
432                                 num_mcceq_processed++;
433                         } else {
434                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
435                                         blk_iopoll_sched(&pbe_eq->iopoll);
436                                 num_ioeq_processed++;
437                         }
438                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
439                         queue_tail_inc(eq);
440                         eqe = queue_tail_node(eq);
441                 }
442                 if (num_ioeq_processed || num_mcceq_processed) {
443                         if (phba->todo_mcc_cq)
444                                 queue_work(phba->wq, &phba->work_cqs);
445
446                 if ((num_mcceq_processed) && (!num_ioeq_processed))
447                                 hwi_ring_eq_db(phba, eq->id, 0,
448                                               (num_ioeq_processed +
449                                                num_mcceq_processed) , 1, 1);
450                         else
451                                 hwi_ring_eq_db(phba, eq->id, 0,
452                                                (num_ioeq_processed +
453                                                 num_mcceq_processed), 0, 1);
454
455                         return IRQ_HANDLED;
456                 } else
457                         return IRQ_NONE;
458         } else {
459                 cq = &phwi_context->be_cq[0];
460                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
461                                                 & EQE_VALID_MASK) {
462
463                         if (((eqe->dw[offsetof(struct amap_eq_entry,
464                              resource_id) / 32] &
465                              EQE_RESID_MASK) >> 16) != cq->id) {
466                                 spin_lock_irqsave(&phba->isr_lock, flags);
467                                 phba->todo_mcc_cq = 1;
468                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
469                         } else {
470                                 spin_lock_irqsave(&phba->isr_lock, flags);
471                                 phba->todo_cq = 1;
472                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
473                         }
474                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
475                         queue_tail_inc(eq);
476                         eqe = queue_tail_node(eq);
477                         num_ioeq_processed++;
478                 }
479                 if (phba->todo_cq || phba->todo_mcc_cq)
480                         queue_work(phba->wq, &phba->work_cqs);
481
482                 if (num_ioeq_processed) {
483                         hwi_ring_eq_db(phba, eq->id, 0,
484                                        num_ioeq_processed, 1, 1);
485                         return IRQ_HANDLED;
486                 } else
487                         return IRQ_NONE;
488         }
489 }
490
491 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
492 {
493         struct pci_dev *pcidev = phba->pcidev;
494         struct hwi_controller *phwi_ctrlr;
495         struct hwi_context_memory *phwi_context;
496         int ret, msix_vec, i = 0;
497         char desc[32];
498
499         phwi_ctrlr = phba->phwi_ctrlr;
500         phwi_context = phwi_ctrlr->phwi_ctxt;
501
502         if (phba->msix_enabled) {
503                 for (i = 0; i < phba->num_cpus; i++) {
504                         sprintf(desc, "beiscsi_msix_%04x", i);
505                         msix_vec = phba->msix_entries[i].vector;
506                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
507                                           &phwi_context->be_eq[i]);
508                 }
509                 msix_vec = phba->msix_entries[i].vector;
510                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
511                                   &phwi_context->be_eq[i]);
512         } else {
513                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
514                                   "beiscsi", phba);
515                 if (ret) {
516                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
517                                      "Failed to register irq\\n");
518                         return ret;
519                 }
520         }
521         return 0;
522 }
523
524 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
525                            unsigned int id, unsigned int num_processed,
526                            unsigned char rearm, unsigned char event)
527 {
528         u32 val = 0;
529         val |= id & DB_CQ_RING_ID_MASK;
530         if (rearm)
531                 val |= 1 << DB_CQ_REARM_SHIFT;
532         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
533         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
534 }
535
536 static unsigned int
537 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
538                           struct beiscsi_hba *phba,
539                           unsigned short cid,
540                           struct pdu_base *ppdu,
541                           unsigned long pdu_len,
542                           void *pbuffer, unsigned long buf_len)
543 {
544         struct iscsi_conn *conn = beiscsi_conn->conn;
545         struct iscsi_session *session = conn->session;
546         struct iscsi_task *task;
547         struct beiscsi_io_task *io_task;
548         struct iscsi_hdr *login_hdr;
549
550         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
551                                                 PDUBASE_OPCODE_MASK) {
552         case ISCSI_OP_NOOP_IN:
553                 pbuffer = NULL;
554                 buf_len = 0;
555                 break;
556         case ISCSI_OP_ASYNC_EVENT:
557                 break;
558         case ISCSI_OP_REJECT:
559                 WARN_ON(!pbuffer);
560                 WARN_ON(!(buf_len == 48));
561                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
562                 break;
563         case ISCSI_OP_LOGIN_RSP:
564                 task = conn->login_task;
565                 io_task = task->dd_data;
566                 login_hdr = (struct iscsi_hdr *)ppdu;
567                 login_hdr->itt = io_task->libiscsi_itt;
568                 break;
569         default:
570                 shost_printk(KERN_WARNING, phba->shost,
571                              "Unrecognized opcode 0x%x in async msg \n",
572                              (ppdu->
573                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
574                                                 & PDUBASE_OPCODE_MASK));
575                 return 1;
576         }
577
578         spin_lock_bh(&session->lock);
579         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
580         spin_unlock_bh(&session->lock);
581         return 0;
582 }
583
584 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
585 {
586         struct sgl_handle *psgl_handle;
587
588         if (phba->io_sgl_hndl_avbl) {
589                 SE_DEBUG(DBG_LVL_8,
590                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
591                          phba->io_sgl_alloc_index);
592                 psgl_handle = phba->io_sgl_hndl_base[phba->
593                                                 io_sgl_alloc_index];
594                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
595                 phba->io_sgl_hndl_avbl--;
596                 if (phba->io_sgl_alloc_index == (phba->params.
597                                                  ios_per_ctrl - 1))
598                         phba->io_sgl_alloc_index = 0;
599                 else
600                         phba->io_sgl_alloc_index++;
601         } else
602                 psgl_handle = NULL;
603         return psgl_handle;
604 }
605
606 static void
607 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
608 {
609         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
610                  phba->io_sgl_free_index);
611         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
612                 /*
613                  * this can happen if clean_task is called on a task that
614                  * failed in xmit_task or alloc_pdu.
615                  */
616                  SE_DEBUG(DBG_LVL_8,
617                          "Double Free in IO SGL io_sgl_free_index=%d,"
618                          "value there=%p \n", phba->io_sgl_free_index,
619                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
620                 return;
621         }
622         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
623         phba->io_sgl_hndl_avbl++;
624         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
625                 phba->io_sgl_free_index = 0;
626         else
627                 phba->io_sgl_free_index++;
628 }
629
630 /**
631  * alloc_wrb_handle - To allocate a wrb handle
632  * @phba: The hba pointer
633  * @cid: The cid to use for allocation
634  * @index: index allocation and wrb index
635  *
636  * This happens under session_lock until submission to chip
637  */
638 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
639                                     int index)
640 {
641         struct hwi_wrb_context *pwrb_context;
642         struct hwi_controller *phwi_ctrlr;
643         struct wrb_handle *pwrb_handle;
644
645         phwi_ctrlr = phba->phwi_ctrlr;
646         pwrb_context = &phwi_ctrlr->wrb_context[cid];
647         if (pwrb_context->wrb_handles_available) {
648                 pwrb_handle = pwrb_context->pwrb_handle_base[
649                                             pwrb_context->alloc_index];
650                 pwrb_context->wrb_handles_available--;
651                 pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
652                 if (pwrb_context->alloc_index ==
653                                                 (phba->params.wrbs_per_cxn - 1))
654                         pwrb_context->alloc_index = 0;
655                 else
656                         pwrb_context->alloc_index++;
657         } else
658                 pwrb_handle = NULL;
659         return pwrb_handle;
660 }
661
662 /**
663  * free_wrb_handle - To free the wrb handle back to pool
664  * @phba: The hba pointer
665  * @pwrb_context: The context to free from
666  * @pwrb_handle: The wrb_handle to free
667  *
668  * This happens under session_lock until submission to chip
669  */
670 static void
671 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
672                 struct wrb_handle *pwrb_handle)
673 {
674         if (!ring_mode)
675                 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
676                                                pwrb_handle;
677         pwrb_context->wrb_handles_available++;
678         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
679                 pwrb_context->free_index = 0;
680         else
681                 pwrb_context->free_index++;
682
683         SE_DEBUG(DBG_LVL_8,
684                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
685                  "wrb_handles_available=%d \n",
686                  pwrb_handle, pwrb_context->free_index,
687                  pwrb_context->wrb_handles_available);
688 }
689
690 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
691 {
692         struct sgl_handle *psgl_handle;
693
694         if (phba->eh_sgl_hndl_avbl) {
695                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
696                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
697                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
698                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
699                 phba->eh_sgl_hndl_avbl--;
700                 if (phba->eh_sgl_alloc_index ==
701                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
702                      1))
703                         phba->eh_sgl_alloc_index = 0;
704                 else
705                         phba->eh_sgl_alloc_index++;
706         } else
707                 psgl_handle = NULL;
708         return psgl_handle;
709 }
710
711 void
712 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
713 {
714
715         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
716                              phba->eh_sgl_free_index);
717         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
718                 /*
719                  * this can happen if clean_task is called on a task that
720                  * failed in xmit_task or alloc_pdu.
721                  */
722                 SE_DEBUG(DBG_LVL_8,
723                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
724                          phba->eh_sgl_free_index);
725                 return;
726         }
727         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
728         phba->eh_sgl_hndl_avbl++;
729         if (phba->eh_sgl_free_index ==
730             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
731                 phba->eh_sgl_free_index = 0;
732         else
733                 phba->eh_sgl_free_index++;
734 }
735
736 static void
737 be_complete_io(struct beiscsi_conn *beiscsi_conn,
738                struct iscsi_task *task, struct sol_cqe *psol)
739 {
740         struct beiscsi_io_task *io_task = task->dd_data;
741         struct be_status_bhs *sts_bhs =
742                                 (struct be_status_bhs *)io_task->cmd_bhs;
743         struct iscsi_conn *conn = beiscsi_conn->conn;
744         unsigned int sense_len;
745         unsigned char *sense;
746         u32 resid = 0, exp_cmdsn, max_cmdsn;
747         u8 rsp, status, flags;
748
749         exp_cmdsn = (psol->
750                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
751                         & SOL_EXP_CMD_SN_MASK);
752         max_cmdsn = ((psol->
753                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
754                         & SOL_EXP_CMD_SN_MASK) +
755                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
756                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
757         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
758                                                 & SOL_RESP_MASK) >> 16);
759         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
760                                                 & SOL_STS_MASK) >> 8);
761         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
762                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
763
764         task->sc->result = (DID_OK << 16) | status;
765         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
766                 task->sc->result = DID_ERROR << 16;
767                 goto unmap;
768         }
769
770         /* bidi not initially supported */
771         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
772                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
773                                 32] & SOL_RES_CNT_MASK);
774
775                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
776                         task->sc->result = DID_ERROR << 16;
777
778                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
779                         scsi_set_resid(task->sc, resid);
780                         if (!status && (scsi_bufflen(task->sc) - resid <
781                             task->sc->underflow))
782                                 task->sc->result = DID_ERROR << 16;
783                 }
784         }
785
786         if (status == SAM_STAT_CHECK_CONDITION) {
787                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
788                 sense = sts_bhs->sense_info + sizeof(unsigned short);
789                 sense_len =  cpu_to_be16(*slen);
790                 memcpy(task->sc->sense_buffer, sense,
791                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
792         }
793         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
794                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
795                                                         & SOL_RES_CNT_MASK)
796                          conn->rxdata_octets += (psol->
797                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
798                              & SOL_RES_CNT_MASK);
799         }
800 unmap:
801         scsi_dma_unmap(io_task->scsi_cmnd);
802         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
803 }
804
805 static void
806 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
807                    struct iscsi_task *task, struct sol_cqe *psol)
808 {
809         struct iscsi_logout_rsp *hdr;
810         struct beiscsi_io_task *io_task = task->dd_data;
811         struct iscsi_conn *conn = beiscsi_conn->conn;
812
813         hdr = (struct iscsi_logout_rsp *)task->hdr;
814         hdr->t2wait = 5;
815         hdr->t2retain = 0;
816         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
817                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
818         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
819                                         32] & SOL_RESP_MASK);
820         hdr->exp_cmdsn = cpu_to_be32(psol->
821                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
822                                         & SOL_EXP_CMD_SN_MASK);
823         hdr->max_cmdsn = be32_to_cpu((psol->
824                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
825                                         & SOL_EXP_CMD_SN_MASK) +
826                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
827                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
828         hdr->hlength = 0;
829         hdr->itt = io_task->libiscsi_itt;
830         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
831 }
832
833 static void
834 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
835                 struct iscsi_task *task, struct sol_cqe *psol)
836 {
837         struct iscsi_tm_rsp *hdr;
838         struct iscsi_conn *conn = beiscsi_conn->conn;
839         struct beiscsi_io_task *io_task = task->dd_data;
840
841         hdr = (struct iscsi_tm_rsp *)task->hdr;
842         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
843                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
844         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
845                                         32] & SOL_RESP_MASK);
846         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
847                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
848         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
849                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
850                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
851                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
852         hdr->itt = io_task->libiscsi_itt;
853         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
854 }
855
856 static void
857 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
858                        struct beiscsi_hba *phba, struct sol_cqe *psol)
859 {
860         struct hwi_wrb_context *pwrb_context;
861         struct wrb_handle *pwrb_handle = NULL;
862         struct sgl_handle *psgl_handle = NULL;
863         struct hwi_controller *phwi_ctrlr;
864         struct iscsi_task *task;
865         struct beiscsi_io_task *io_task;
866         struct iscsi_conn *conn = beiscsi_conn->conn;
867         struct iscsi_session *session = conn->session;
868
869         phwi_ctrlr = phba->phwi_ctrlr;
870         if (ring_mode) {
871                 psgl_handle = phba->sgl_hndl_array[((psol->
872                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
873                                 32] & SOL_ICD_INDEX_MASK) >> 6)];
874                 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
875                 task = psgl_handle->task;
876                 pwrb_handle = NULL;
877         } else {
878                 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
879                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
880                                 SOL_CID_MASK) >> 6)];
881                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
882                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
883                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
884                 task = pwrb_handle->pio_handle;
885         }
886
887         io_task = task->dd_data;
888         spin_lock(&phba->mgmt_sgl_lock);
889         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
890         spin_unlock(&phba->mgmt_sgl_lock);
891         spin_lock_bh(&session->lock);
892         free_wrb_handle(phba, pwrb_context, pwrb_handle);
893         spin_unlock_bh(&session->lock);
894 }
895
896 static void
897 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
898                        struct iscsi_task *task, struct sol_cqe *psol)
899 {
900         struct iscsi_nopin *hdr;
901         struct iscsi_conn *conn = beiscsi_conn->conn;
902         struct beiscsi_io_task *io_task = task->dd_data;
903
904         hdr = (struct iscsi_nopin *)task->hdr;
905         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
906                         & SOL_FLAGS_MASK) >> 24) | 0x80;
907         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
908                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
909         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
910                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
911                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
912                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
913         hdr->opcode = ISCSI_OP_NOOP_IN;
914         hdr->itt = io_task->libiscsi_itt;
915         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
916 }
917
918 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
919                              struct beiscsi_hba *phba, struct sol_cqe *psol)
920 {
921         struct hwi_wrb_context *pwrb_context;
922         struct wrb_handle *pwrb_handle;
923         struct iscsi_wrb *pwrb = NULL;
924         struct hwi_controller *phwi_ctrlr;
925         struct iscsi_task *task;
926         struct sgl_handle *psgl_handle = NULL;
927         unsigned int type;
928         struct iscsi_conn *conn = beiscsi_conn->conn;
929         struct iscsi_session *session = conn->session;
930
931         phwi_ctrlr = phba->phwi_ctrlr;
932         if (ring_mode) {
933                 psgl_handle = phba->sgl_hndl_array[((psol->
934                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
935                               32] & SOL_ICD_INDEX_MASK) >> 6)];
936                 task = psgl_handle->task;
937                 type = psgl_handle->type;
938         } else {
939                 pwrb_context = &phwi_ctrlr->
940                                 wrb_context[((psol->dw[offsetof
941                                 (struct amap_sol_cqe, cid) / 32]
942                                 & SOL_CID_MASK) >> 6)];
943                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
944                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
945                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
946                 task = pwrb_handle->pio_handle;
947                 pwrb = pwrb_handle->pwrb;
948                 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
949                          WRB_TYPE_MASK) >> 28;
950         }
951         spin_lock_bh(&session->lock);
952         switch (type) {
953         case HWH_TYPE_IO:
954         case HWH_TYPE_IO_RD:
955                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
956                     ISCSI_OP_NOOP_OUT) {
957                         be_complete_nopin_resp(beiscsi_conn, task, psol);
958                 } else
959                         be_complete_io(beiscsi_conn, task, psol);
960                 break;
961
962         case HWH_TYPE_LOGOUT:
963                 be_complete_logout(beiscsi_conn, task, psol);
964                 break;
965
966         case HWH_TYPE_LOGIN:
967                 SE_DEBUG(DBG_LVL_1,
968                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
969                          "- Solicited path \n");
970                 break;
971
972         case HWH_TYPE_TMF:
973                 be_complete_tmf(beiscsi_conn, task, psol);
974                 break;
975
976         case HWH_TYPE_NOP:
977                 be_complete_nopin_resp(beiscsi_conn, task, psol);
978                 break;
979
980         default:
981                 if (ring_mode)
982                         shost_printk(KERN_WARNING, phba->shost,
983                                 "In hwi_complete_cmd, unknown type = %d"
984                                 "icd_index 0x%x CID 0x%x\n", type,
985                                 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
986                                 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
987                                 psgl_handle->cid);
988                 else
989                         shost_printk(KERN_WARNING, phba->shost,
990                                 "In hwi_complete_cmd, unknown type = %d"
991                                 "wrb_index 0x%x CID 0x%x\n", type,
992                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
993                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
994                                 ((psol->dw[offsetof(struct amap_sol_cqe,
995                                 cid) / 32] & SOL_CID_MASK) >> 6));
996                 break;
997         }
998
999         spin_unlock_bh(&session->lock);
1000 }
1001
1002 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1003                                           *pasync_ctx, unsigned int is_header,
1004                                           unsigned int host_write_ptr)
1005 {
1006         if (is_header)
1007                 return &pasync_ctx->async_entry[host_write_ptr].
1008                     header_busy_list;
1009         else
1010                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1011 }
1012
1013 static struct async_pdu_handle *
1014 hwi_get_async_handle(struct beiscsi_hba *phba,
1015                      struct beiscsi_conn *beiscsi_conn,
1016                      struct hwi_async_pdu_context *pasync_ctx,
1017                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1018 {
1019         struct be_bus_address phys_addr;
1020         struct list_head *pbusy_list;
1021         struct async_pdu_handle *pasync_handle = NULL;
1022         int buffer_len = 0;
1023         unsigned char buffer_index = -1;
1024         unsigned char is_header = 0;
1025
1026         phys_addr.u.a32.address_lo =
1027             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1028             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1029                                                 & PDUCQE_DPL_MASK) >> 16);
1030         phys_addr.u.a32.address_hi =
1031             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1032
1033         phys_addr.u.a64.address =
1034                         *((unsigned long long *)(&phys_addr.u.a64.address));
1035
1036         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1037                         & PDUCQE_CODE_MASK) {
1038         case UNSOL_HDR_NOTIFY:
1039                 is_header = 1;
1040
1041                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1042                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1043                         index) / 32] & PDUCQE_INDEX_MASK));
1044
1045                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1046                                 pasync_ctx->async_header.pa_base.u.a64.address);
1047
1048                 buffer_index = buffer_len /
1049                                 pasync_ctx->async_header.buffer_size;
1050
1051                 break;
1052         case UNSOL_DATA_NOTIFY:
1053                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1054                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1055                                         index) / 32] & PDUCQE_INDEX_MASK));
1056                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1057                                         pasync_ctx->async_data.pa_base.u.
1058                                         a64.address);
1059                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1060                 break;
1061         default:
1062                 pbusy_list = NULL;
1063                 shost_printk(KERN_WARNING, phba->shost,
1064                         "Unexpected code=%d \n",
1065                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1066                                         code) / 32] & PDUCQE_CODE_MASK);
1067                 return NULL;
1068         }
1069
1070         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1071         WARN_ON(list_empty(pbusy_list));
1072         list_for_each_entry(pasync_handle, pbusy_list, link) {
1073                 WARN_ON(pasync_handle->consumed);
1074                 if (pasync_handle->index == buffer_index)
1075                         break;
1076         }
1077
1078         WARN_ON(!pasync_handle);
1079
1080         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
1081         pasync_handle->is_header = is_header;
1082         pasync_handle->buffer_len = ((pdpdu_cqe->
1083                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1084                         & PDUCQE_DPL_MASK) >> 16);
1085
1086         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1087                         index) / 32] & PDUCQE_INDEX_MASK);
1088         return pasync_handle;
1089 }
1090
1091 static unsigned int
1092 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1093                            unsigned int is_header, unsigned int cq_index)
1094 {
1095         struct list_head *pbusy_list;
1096         struct async_pdu_handle *pasync_handle;
1097         unsigned int num_entries, writables = 0;
1098         unsigned int *pep_read_ptr, *pwritables;
1099
1100
1101         if (is_header) {
1102                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1103                 pwritables = &pasync_ctx->async_header.writables;
1104                 num_entries = pasync_ctx->async_header.num_entries;
1105         } else {
1106                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1107                 pwritables = &pasync_ctx->async_data.writables;
1108                 num_entries = pasync_ctx->async_data.num_entries;
1109         }
1110
1111         while ((*pep_read_ptr) != cq_index) {
1112                 (*pep_read_ptr)++;
1113                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1114
1115                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1116                                                      *pep_read_ptr);
1117                 if (writables == 0)
1118                         WARN_ON(list_empty(pbusy_list));
1119
1120                 if (!list_empty(pbusy_list)) {
1121                         pasync_handle = list_entry(pbusy_list->next,
1122                                                    struct async_pdu_handle,
1123                                                    link);
1124                         WARN_ON(!pasync_handle);
1125                         pasync_handle->consumed = 1;
1126                 }
1127
1128                 writables++;
1129         }
1130
1131         if (!writables) {
1132                 SE_DEBUG(DBG_LVL_1,
1133                          "Duplicate notification received - index 0x%x!!\n",
1134                          cq_index);
1135                 WARN_ON(1);
1136         }
1137
1138         *pwritables = *pwritables + writables;
1139         return 0;
1140 }
1141
1142 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1143                                        unsigned int cri)
1144 {
1145         struct hwi_controller *phwi_ctrlr;
1146         struct hwi_async_pdu_context *pasync_ctx;
1147         struct async_pdu_handle *pasync_handle, *tmp_handle;
1148         struct list_head *plist;
1149         unsigned int i = 0;
1150
1151         phwi_ctrlr = phba->phwi_ctrlr;
1152         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1153
1154         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1155
1156         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1157                 list_del(&pasync_handle->link);
1158
1159                 if (i == 0) {
1160                         list_add_tail(&pasync_handle->link,
1161                                       &pasync_ctx->async_header.free_list);
1162                         pasync_ctx->async_header.free_entries++;
1163                         i++;
1164                 } else {
1165                         list_add_tail(&pasync_handle->link,
1166                                       &pasync_ctx->async_data.free_list);
1167                         pasync_ctx->async_data.free_entries++;
1168                         i++;
1169                 }
1170         }
1171
1172         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1173         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1174         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1175         return 0;
1176 }
1177
1178 static struct phys_addr *
1179 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1180                      unsigned int is_header, unsigned int host_write_ptr)
1181 {
1182         struct phys_addr *pasync_sge = NULL;
1183
1184         if (is_header)
1185                 pasync_sge = pasync_ctx->async_header.ring_base;
1186         else
1187                 pasync_sge = pasync_ctx->async_data.ring_base;
1188
1189         return pasync_sge + host_write_ptr;
1190 }
1191
1192 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1193                                    unsigned int is_header)
1194 {
1195         struct hwi_controller *phwi_ctrlr;
1196         struct hwi_async_pdu_context *pasync_ctx;
1197         struct async_pdu_handle *pasync_handle;
1198         struct list_head *pfree_link, *pbusy_list;
1199         struct phys_addr *pasync_sge;
1200         unsigned int ring_id, num_entries;
1201         unsigned int host_write_num;
1202         unsigned int writables;
1203         unsigned int i = 0;
1204         u32 doorbell = 0;
1205
1206         phwi_ctrlr = phba->phwi_ctrlr;
1207         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1208
1209         if (is_header) {
1210                 num_entries = pasync_ctx->async_header.num_entries;
1211                 writables = min(pasync_ctx->async_header.writables,
1212                                 pasync_ctx->async_header.free_entries);
1213                 pfree_link = pasync_ctx->async_header.free_list.next;
1214                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1215                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1216         } else {
1217                 num_entries = pasync_ctx->async_data.num_entries;
1218                 writables = min(pasync_ctx->async_data.writables,
1219                                 pasync_ctx->async_data.free_entries);
1220                 pfree_link = pasync_ctx->async_data.free_list.next;
1221                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1222                 ring_id = phwi_ctrlr->default_pdu_data.id;
1223         }
1224
1225         writables = (writables / 8) * 8;
1226         if (writables) {
1227                 for (i = 0; i < writables; i++) {
1228                         pbusy_list =
1229                             hwi_get_async_busy_list(pasync_ctx, is_header,
1230                                                     host_write_num);
1231                         pasync_handle =
1232                             list_entry(pfree_link, struct async_pdu_handle,
1233                                                                 link);
1234                         WARN_ON(!pasync_handle);
1235                         pasync_handle->consumed = 0;
1236
1237                         pfree_link = pfree_link->next;
1238
1239                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1240                                                 is_header, host_write_num);
1241
1242                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1243                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1244
1245                         list_move(&pasync_handle->link, pbusy_list);
1246
1247                         host_write_num++;
1248                         host_write_num = host_write_num % num_entries;
1249                 }
1250
1251                 if (is_header) {
1252                         pasync_ctx->async_header.host_write_ptr =
1253                                                         host_write_num;
1254                         pasync_ctx->async_header.free_entries -= writables;
1255                         pasync_ctx->async_header.writables -= writables;
1256                         pasync_ctx->async_header.busy_entries += writables;
1257                 } else {
1258                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1259                         pasync_ctx->async_data.free_entries -= writables;
1260                         pasync_ctx->async_data.writables -= writables;
1261                         pasync_ctx->async_data.busy_entries += writables;
1262                 }
1263
1264                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1265                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1266                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1267                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1268                                         << DB_DEF_PDU_CQPROC_SHIFT;
1269
1270                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1271         }
1272 }
1273
1274 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1275                                          struct beiscsi_conn *beiscsi_conn,
1276                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1277 {
1278         struct hwi_controller *phwi_ctrlr;
1279         struct hwi_async_pdu_context *pasync_ctx;
1280         struct async_pdu_handle *pasync_handle = NULL;
1281         unsigned int cq_index = -1;
1282
1283         phwi_ctrlr = phba->phwi_ctrlr;
1284         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1285
1286         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1287                                              pdpdu_cqe, &cq_index);
1288         BUG_ON(pasync_handle->is_header != 0);
1289         if (pasync_handle->consumed == 0)
1290                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1291                                            cq_index);
1292
1293         hwi_free_async_msg(phba, pasync_handle->cri);
1294         hwi_post_async_buffers(phba, pasync_handle->is_header);
1295 }
1296
1297 static unsigned int
1298 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1299                   struct beiscsi_hba *phba,
1300                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1301 {
1302         struct list_head *plist;
1303         struct async_pdu_handle *pasync_handle;
1304         void *phdr = NULL;
1305         unsigned int hdr_len = 0, buf_len = 0;
1306         unsigned int status, index = 0, offset = 0;
1307         void *pfirst_buffer = NULL;
1308         unsigned int num_buf = 0;
1309
1310         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1311
1312         list_for_each_entry(pasync_handle, plist, link) {
1313                 if (index == 0) {
1314                         phdr = pasync_handle->pbuffer;
1315                         hdr_len = pasync_handle->buffer_len;
1316                 } else {
1317                         buf_len = pasync_handle->buffer_len;
1318                         if (!num_buf) {
1319                                 pfirst_buffer = pasync_handle->pbuffer;
1320                                 num_buf++;
1321                         }
1322                         memcpy(pfirst_buffer + offset,
1323                                pasync_handle->pbuffer, buf_len);
1324                         offset = buf_len;
1325                 }
1326                 index++;
1327         }
1328
1329         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1330                                            beiscsi_conn->beiscsi_conn_cid,
1331                                            phdr, hdr_len, pfirst_buffer,
1332                                            buf_len);
1333
1334         if (status == 0)
1335                 hwi_free_async_msg(phba, cri);
1336         return 0;
1337 }
1338
1339 static unsigned int
1340 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1341                      struct beiscsi_hba *phba,
1342                      struct async_pdu_handle *pasync_handle)
1343 {
1344         struct hwi_async_pdu_context *pasync_ctx;
1345         struct hwi_controller *phwi_ctrlr;
1346         unsigned int bytes_needed = 0, status = 0;
1347         unsigned short cri = pasync_handle->cri;
1348         struct pdu_base *ppdu;
1349
1350         phwi_ctrlr = phba->phwi_ctrlr;
1351         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1352
1353         list_del(&pasync_handle->link);
1354         if (pasync_handle->is_header) {
1355                 pasync_ctx->async_header.busy_entries--;
1356                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1357                         hwi_free_async_msg(phba, cri);
1358                         BUG();
1359                 }
1360
1361                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1362                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1363                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1364                                 (unsigned short)pasync_handle->buffer_len;
1365                 list_add_tail(&pasync_handle->link,
1366                               &pasync_ctx->async_entry[cri].wait_queue.list);
1367
1368                 ppdu = pasync_handle->pbuffer;
1369                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1370                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1371                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1372                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1373                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1374
1375                 if (status == 0) {
1376                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1377                             bytes_needed;
1378
1379                         if (bytes_needed == 0)
1380                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1381                                                            pasync_ctx, cri);
1382                 }
1383         } else {
1384                 pasync_ctx->async_data.busy_entries--;
1385                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1386                         list_add_tail(&pasync_handle->link,
1387                                       &pasync_ctx->async_entry[cri].wait_queue.
1388                                       list);
1389                         pasync_ctx->async_entry[cri].wait_queue.
1390                                 bytes_received +=
1391                                 (unsigned short)pasync_handle->buffer_len;
1392
1393                         if (pasync_ctx->async_entry[cri].wait_queue.
1394                             bytes_received >=
1395                             pasync_ctx->async_entry[cri].wait_queue.
1396                             bytes_needed)
1397                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1398                                                            pasync_ctx, cri);
1399                 }
1400         }
1401         return status;
1402 }
1403
1404 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1405                                          struct beiscsi_hba *phba,
1406                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1407 {
1408         struct hwi_controller *phwi_ctrlr;
1409         struct hwi_async_pdu_context *pasync_ctx;
1410         struct async_pdu_handle *pasync_handle = NULL;
1411         unsigned int cq_index = -1;
1412
1413         phwi_ctrlr = phba->phwi_ctrlr;
1414         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1415         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1416                                              pdpdu_cqe, &cq_index);
1417
1418         if (pasync_handle->consumed == 0)
1419                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1420                                            cq_index);
1421         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1422         hwi_post_async_buffers(phba, pasync_handle->is_header);
1423 }
1424
1425
1426 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1427 {
1428         struct be_queue_info *cq;
1429         struct sol_cqe *sol;
1430         struct dmsg_cqe *dmsg;
1431         unsigned int num_processed = 0;
1432         unsigned int tot_nump = 0;
1433         struct beiscsi_conn *beiscsi_conn;
1434         struct sgl_handle *psgl_handle = NULL;
1435         struct beiscsi_hba *phba;
1436
1437         cq = pbe_eq->cq;
1438         sol = queue_tail_node(cq);
1439         phba = pbe_eq->phba;
1440
1441         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1442                CQE_VALID_MASK) {
1443                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1444
1445                 if (ring_mode) {
1446                         psgl_handle = phba->sgl_hndl_array[((sol->
1447                                       dw[offsetof(struct amap_sol_cqe_ring,
1448                                       icd_index) / 32] & SOL_ICD_INDEX_MASK)
1449                                       >> 6)];
1450                         beiscsi_conn = phba->conn_table[psgl_handle->cid];
1451                         if (!beiscsi_conn || !beiscsi_conn->ep) {
1452                                 shost_printk(KERN_WARNING, phba->shost,
1453                                      "Connection table empty for cid = %d\n",
1454                                       psgl_handle->cid);
1455                                 return 0;
1456                         }
1457
1458                 } else {
1459                         beiscsi_conn = phba->conn_table[(u32) (sol->
1460                                  dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1461                                  SOL_CID_MASK) >> 6];
1462
1463                         if (!beiscsi_conn || !beiscsi_conn->ep) {
1464                                 shost_printk(KERN_WARNING, phba->shost,
1465                                      "Connection table empty for cid = %d\n",
1466                                      (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1467                                      cid) / 32] & SOL_CID_MASK) >> 6);
1468                                 return 0;
1469                         }
1470                 }
1471
1472                 if (num_processed >= 32) {
1473                         hwi_ring_cq_db(phba, cq->id,
1474                                         num_processed, 0, 0);
1475                         tot_nump += num_processed;
1476                         num_processed = 0;
1477                 }
1478
1479                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1480                         32] & CQE_CODE_MASK) {
1481                 case SOL_CMD_COMPLETE:
1482                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1483                         break;
1484                 case DRIVERMSG_NOTIFY:
1485                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1486                         dmsg = (struct dmsg_cqe *)sol;
1487                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1488                         break;
1489                 case UNSOL_HDR_NOTIFY:
1490                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1491                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1492                                              (struct i_t_dpdu_cqe *)sol);
1493                         break;
1494                 case UNSOL_DATA_NOTIFY:
1495                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1496                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1497                                              (struct i_t_dpdu_cqe *)sol);
1498                         break;
1499                 case CXN_INVALIDATE_INDEX_NOTIFY:
1500                 case CMD_INVALIDATED_NOTIFY:
1501                 case CXN_INVALIDATE_NOTIFY:
1502                         SE_DEBUG(DBG_LVL_1,
1503                                  "Ignoring CQ Error notification for cmd/cxn"
1504                                  "invalidate\n");
1505                         break;
1506                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1507                 case CMD_KILLED_INVALID_STATSN_RCVD:
1508                 case CMD_KILLED_INVALID_R2T_RCVD:
1509                 case CMD_CXN_KILLED_LUN_INVALID:
1510                 case CMD_CXN_KILLED_ICD_INVALID:
1511                 case CMD_CXN_KILLED_ITT_INVALID:
1512                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1513                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1514                         if (ring_mode) {
1515                                 SE_DEBUG(DBG_LVL_1,
1516                                  "CQ Error notification for cmd.. "
1517                                  "code %d cid 0x%x\n",
1518                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1519                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1520                         } else {
1521                                 SE_DEBUG(DBG_LVL_1,
1522                                  "CQ Error notification for cmd.. "
1523                                  "code %d cid 0x%x\n",
1524                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1525                                  32] & CQE_CODE_MASK,
1526                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1527                                  32] & SOL_CID_MASK));
1528                         }
1529                         break;
1530                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1531                         SE_DEBUG(DBG_LVL_1,
1532                                  "Digest error on def pdu ring, dropping..\n");
1533                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1534                                              (struct i_t_dpdu_cqe *) sol);
1535                         break;
1536                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1537                 case CXN_KILLED_BURST_LEN_MISMATCH:
1538                 case CXN_KILLED_AHS_RCVD:
1539                 case CXN_KILLED_HDR_DIGEST_ERR:
1540                 case CXN_KILLED_UNKNOWN_HDR:
1541                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1542                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1543                 case CXN_KILLED_TIMED_OUT:
1544                 case CXN_KILLED_FIN_RCVD:
1545                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1546                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1547                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1548                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1549                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1550                         if (ring_mode) {
1551                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1552                                  "0x%x...\n",
1553                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1554                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1555                         } else {
1556                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1557                                  "0x%x...\n",
1558                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1559                                  32] & CQE_CODE_MASK,
1560                                  sol->dw[offsetof(struct amap_sol_cqe, cid) /
1561                                  32] & CQE_CID_MASK);
1562                         }
1563                         iscsi_conn_failure(beiscsi_conn->conn,
1564                                            ISCSI_ERR_CONN_FAILED);
1565                         break;
1566                 case CXN_KILLED_RST_SENT:
1567                 case CXN_KILLED_RST_RCVD:
1568                         if (ring_mode) {
1569                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1570                                 "received/sent on CID 0x%x...\n",
1571                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1572                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1573                         } else {
1574                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1575                                 "received/sent on CID 0x%x...\n",
1576                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1577                                  32] & CQE_CODE_MASK,
1578                                  sol->dw[offsetof(struct amap_sol_cqe, cid) /
1579                                  32] & CQE_CID_MASK);
1580                         }
1581                         iscsi_conn_failure(beiscsi_conn->conn,
1582                                            ISCSI_ERR_CONN_FAILED);
1583                         break;
1584                 default:
1585                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1586                                  "received on CID 0x%x...\n",
1587                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1588                                  32] & CQE_CODE_MASK,
1589                                  sol->dw[offsetof(struct amap_sol_cqe, cid) /
1590                                  32] & CQE_CID_MASK);
1591                         break;
1592                 }
1593
1594                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1595                 queue_tail_inc(cq);
1596                 sol = queue_tail_node(cq);
1597                 num_processed++;
1598         }
1599
1600         if (num_processed > 0) {
1601                 tot_nump += num_processed;
1602                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1603         }
1604         return tot_nump;
1605 }
1606
1607 static void beiscsi_process_all_cqs(struct work_struct *work)
1608 {
1609         unsigned long flags;
1610         struct hwi_controller *phwi_ctrlr;
1611         struct hwi_context_memory *phwi_context;
1612         struct be_eq_obj *pbe_eq;
1613         struct beiscsi_hba *phba =
1614             container_of(work, struct beiscsi_hba, work_cqs);
1615
1616         phwi_ctrlr = phba->phwi_ctrlr;
1617         phwi_context = phwi_ctrlr->phwi_ctxt;
1618         if (phba->msix_enabled)
1619                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1620         else
1621                 pbe_eq = &phwi_context->be_eq[0];
1622
1623         if (phba->todo_mcc_cq) {
1624                 spin_lock_irqsave(&phba->isr_lock, flags);
1625                 phba->todo_mcc_cq = 0;
1626                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1627         }
1628
1629         if (phba->todo_cq) {
1630                 spin_lock_irqsave(&phba->isr_lock, flags);
1631                 phba->todo_cq = 0;
1632                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1633                 beiscsi_process_cq(pbe_eq);
1634         }
1635 }
1636
1637 static int be_iopoll(struct blk_iopoll *iop, int budget)
1638 {
1639         static unsigned int ret;
1640         struct beiscsi_hba *phba;
1641         struct be_eq_obj *pbe_eq;
1642
1643         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1644         ret = beiscsi_process_cq(pbe_eq);
1645         if (ret < budget) {
1646                 phba = pbe_eq->phba;
1647                 blk_iopoll_complete(iop);
1648                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1649                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1650         }
1651         return ret;
1652 }
1653
1654 static void
1655 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1656               unsigned int num_sg, struct beiscsi_io_task *io_task)
1657 {
1658         struct iscsi_sge *psgl;
1659         unsigned short sg_len, index;
1660         unsigned int sge_len = 0;
1661         unsigned long long addr;
1662         struct scatterlist *l_sg;
1663         unsigned int offset;
1664
1665         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1666                                       io_task->bhs_pa.u.a32.address_lo);
1667         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1668                                       io_task->bhs_pa.u.a32.address_hi);
1669
1670         l_sg = sg;
1671         for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1672                 if (index == 0) {
1673                         sg_len = sg_dma_len(sg);
1674                         addr = (u64) sg_dma_address(sg);
1675                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1676                                                         (addr & 0xFFFFFFFF));
1677                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1678                                                         (addr >> 32));
1679                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1680                                                         sg_len);
1681                         sge_len = sg_len;
1682                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1683                                                         1);
1684                 } else {
1685                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1686                                                         0);
1687                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1688                                                         pwrb, sge_len);
1689                         sg_len = sg_dma_len(sg);
1690                         addr = (u64) sg_dma_address(sg);
1691                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1692                                                         (addr & 0xFFFFFFFF));
1693                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1694                                                         (addr >> 32));
1695                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1696                                                         sg_len);
1697                 }
1698         }
1699         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1700         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1701
1702         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1703
1704         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1705                         io_task->bhs_pa.u.a32.address_hi);
1706         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1707                         io_task->bhs_pa.u.a32.address_lo);
1708
1709         if (num_sg == 2)
1710                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1711         sg = l_sg;
1712         psgl++;
1713         psgl++;
1714         offset = 0;
1715         for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1716                 sg_len = sg_dma_len(sg);
1717                 addr = (u64) sg_dma_address(sg);
1718                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1719                                                 (addr & 0xFFFFFFFF));
1720                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1721                                                 (addr >> 32));
1722                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1723                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1724                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1725                 offset += sg_len;
1726         }
1727         psgl--;
1728         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1729 }
1730
1731 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1732 {
1733         struct iscsi_sge *psgl;
1734         unsigned long long addr;
1735         struct beiscsi_io_task *io_task = task->dd_data;
1736         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1737         struct beiscsi_hba *phba = beiscsi_conn->phba;
1738
1739         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1740         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1741                                 io_task->bhs_pa.u.a32.address_lo);
1742         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1743                                 io_task->bhs_pa.u.a32.address_hi);
1744
1745         if (task->data) {
1746                 if (task->data_count) {
1747                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1748                         addr = (u64) pci_map_single(phba->pcidev,
1749                                                     task->data,
1750                                                     task->data_count, 1);
1751                 } else {
1752                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1753                         addr = 0;
1754                 }
1755                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1756                                                 (addr & 0xFFFFFFFF));
1757                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1758                                                 (addr >> 32));
1759                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1760                                                 task->data_count);
1761
1762                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1763         } else {
1764                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1765                 addr = 0;
1766         }
1767
1768         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1769
1770         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1771
1772         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1773                       io_task->bhs_pa.u.a32.address_hi);
1774         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1775                       io_task->bhs_pa.u.a32.address_lo);
1776         if (task->data) {
1777                 psgl++;
1778                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1779                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1780                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1781                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1782                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1783                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1784
1785                 psgl++;
1786                 if (task->data) {
1787                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1788                                                 (addr & 0xFFFFFFFF));
1789                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1790                                                 (addr >> 32));
1791                 }
1792                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1793         }
1794         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1795 }
1796
1797 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1798 {
1799         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1800         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1801         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1802
1803         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1804                                       sizeof(struct sol_cqe));
1805         num_async_pdu_buf_pages =
1806                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1807                                        phba->params.defpdu_hdr_sz);
1808         num_async_pdu_buf_sgl_pages =
1809                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1810                                        sizeof(struct phys_addr));
1811         num_async_pdu_data_pages =
1812                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1813                                        phba->params.defpdu_data_sz);
1814         num_async_pdu_data_sgl_pages =
1815                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1816                                        sizeof(struct phys_addr));
1817
1818         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1819
1820         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1821                                                  BE_ISCSI_PDU_HEADER_SIZE;
1822         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1823                                             sizeof(struct hwi_context_memory);
1824
1825
1826         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1827             * (phba->params.wrbs_per_cxn)
1828             * phba->params.cxns_per_ctrl;
1829         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1830                                  (phba->params.wrbs_per_cxn);
1831         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1832                                 phba->params.cxns_per_ctrl);
1833
1834         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1835                 phba->params.icds_per_ctrl;
1836         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1837                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1838
1839         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1840                 num_async_pdu_buf_pages * PAGE_SIZE;
1841         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1842                 num_async_pdu_data_pages * PAGE_SIZE;
1843         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1844                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1845         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1846                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1847         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1848                 phba->params.asyncpdus_per_ctrl *
1849                 sizeof(struct async_pdu_handle);
1850         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1851                 phba->params.asyncpdus_per_ctrl *
1852                 sizeof(struct async_pdu_handle);
1853         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1854                 sizeof(struct hwi_async_pdu_context) +
1855                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1856 }
1857
1858 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1859 {
1860         struct be_mem_descriptor *mem_descr;
1861         dma_addr_t bus_add;
1862         struct mem_array *mem_arr, *mem_arr_orig;
1863         unsigned int i, j, alloc_size, curr_alloc_size;
1864
1865         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1866         if (!phba->phwi_ctrlr)
1867                 return -ENOMEM;
1868
1869         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1870                                  GFP_KERNEL);
1871         if (!phba->init_mem) {
1872                 kfree(phba->phwi_ctrlr);
1873                 return -ENOMEM;
1874         }
1875
1876         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1877                                GFP_KERNEL);
1878         if (!mem_arr_orig) {
1879                 kfree(phba->init_mem);
1880                 kfree(phba->phwi_ctrlr);
1881                 return -ENOMEM;
1882         }
1883
1884         mem_descr = phba->init_mem;
1885         for (i = 0; i < SE_MEM_MAX; i++) {
1886                 j = 0;
1887                 mem_arr = mem_arr_orig;
1888                 alloc_size = phba->mem_req[i];
1889                 memset(mem_arr, 0, sizeof(struct mem_array) *
1890                        BEISCSI_MAX_FRAGS_INIT);
1891                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1892                 do {
1893                         mem_arr->virtual_address = pci_alloc_consistent(
1894                                                         phba->pcidev,
1895                                                         curr_alloc_size,
1896                                                         &bus_add);
1897                         if (!mem_arr->virtual_address) {
1898                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1899                                         goto free_mem;
1900                                 if (curr_alloc_size -
1901                                         rounddown_pow_of_two(curr_alloc_size))
1902                                         curr_alloc_size = rounddown_pow_of_two
1903                                                              (curr_alloc_size);
1904                                 else
1905                                         curr_alloc_size = curr_alloc_size / 2;
1906                         } else {
1907                                 mem_arr->bus_address.u.
1908                                     a64.address = (__u64) bus_add;
1909                                 mem_arr->size = curr_alloc_size;
1910                                 alloc_size -= curr_alloc_size;
1911                                 curr_alloc_size = min(be_max_phys_size *
1912                                                       1024, alloc_size);
1913                                 j++;
1914                                 mem_arr++;
1915                         }
1916                 } while (alloc_size);
1917                 mem_descr->num_elements = j;
1918                 mem_descr->size_in_bytes = phba->mem_req[i];
1919                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1920                                                GFP_KERNEL);
1921                 if (!mem_descr->mem_array)
1922                         goto free_mem;
1923
1924                 memcpy(mem_descr->mem_array, mem_arr_orig,
1925                        sizeof(struct mem_array) * j);
1926                 mem_descr++;
1927         }
1928         kfree(mem_arr_orig);
1929         return 0;
1930 free_mem:
1931         mem_descr->num_elements = j;
1932         while ((i) || (j)) {
1933                 for (j = mem_descr->num_elements; j > 0; j--) {
1934                         pci_free_consistent(phba->pcidev,
1935                                             mem_descr->mem_array[j - 1].size,
1936                                             mem_descr->mem_array[j - 1].
1937                                             virtual_address,
1938                                             mem_descr->mem_array[j - 1].
1939                                             bus_address.u.a64.address);
1940                 }
1941                 if (i) {
1942                         i--;
1943                         kfree(mem_descr->mem_array);
1944                         mem_descr--;
1945                 }
1946         }
1947         kfree(mem_arr_orig);
1948         kfree(phba->init_mem);
1949         kfree(phba->phwi_ctrlr);
1950         return -ENOMEM;
1951 }
1952
1953 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1954 {
1955         beiscsi_find_mem_req(phba);
1956         return beiscsi_alloc_mem(phba);
1957 }
1958
1959 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1960 {
1961         struct pdu_data_out *pdata_out;
1962         struct pdu_nop_out *pnop_out;
1963         struct be_mem_descriptor *mem_descr;
1964
1965         mem_descr = phba->init_mem;
1966         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1967         pdata_out =
1968             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1969         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1970
1971         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1972                       IIOC_SCSI_DATA);
1973
1974         pnop_out =
1975             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1976                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1977
1978         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1979         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1980         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1981         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1982 }
1983
1984 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1985 {
1986         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1987         struct wrb_handle *pwrb_handle;
1988         struct hwi_controller *phwi_ctrlr;
1989         struct hwi_wrb_context *pwrb_context;
1990         struct iscsi_wrb *pwrb;
1991         unsigned int num_cxn_wrbh;
1992         unsigned int num_cxn_wrb, j, idx, index;
1993
1994         mem_descr_wrbh = phba->init_mem;
1995         mem_descr_wrbh += HWI_MEM_WRBH;
1996
1997         mem_descr_wrb = phba->init_mem;
1998         mem_descr_wrb += HWI_MEM_WRB;
1999
2000         idx = 0;
2001         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2002         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2003                         ((sizeof(struct wrb_handle)) *
2004                          phba->params.wrbs_per_cxn));
2005         phwi_ctrlr = phba->phwi_ctrlr;
2006
2007         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2008                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2009                 pwrb_context->pwrb_handle_base =
2010                                 kzalloc(sizeof(struct wrb_handle *) *
2011                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2012                 pwrb_context->pwrb_handle_basestd =
2013                                 kzalloc(sizeof(struct wrb_handle *) *
2014                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2015                 if (num_cxn_wrbh) {
2016                         pwrb_context->alloc_index = 0;
2017                         pwrb_context->wrb_handles_available = 0;
2018                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2019                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2020                                 pwrb_context->pwrb_handle_basestd[j] =
2021                                                                 pwrb_handle;
2022                                 pwrb_context->wrb_handles_available++;
2023                                 pwrb_handle->wrb_index = j;
2024                                 pwrb_handle++;
2025                         }
2026                         pwrb_context->free_index = 0;
2027                         num_cxn_wrbh--;
2028                 } else {
2029                         idx++;
2030                         pwrb_handle =
2031                             mem_descr_wrbh->mem_array[idx].virtual_address;
2032                         num_cxn_wrbh =
2033                             ((mem_descr_wrbh->mem_array[idx].size) /
2034                              ((sizeof(struct wrb_handle)) *
2035                               phba->params.wrbs_per_cxn));
2036                         pwrb_context->alloc_index = 0;
2037                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2038                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2039                                 pwrb_context->pwrb_handle_basestd[j] =
2040                                     pwrb_handle;
2041                                 pwrb_context->wrb_handles_available++;
2042                                 pwrb_handle->wrb_index = j;
2043                                 pwrb_handle++;
2044                         }
2045                         pwrb_context->free_index = 0;
2046                         num_cxn_wrbh--;
2047                 }
2048         }
2049         idx = 0;
2050         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2051         num_cxn_wrb =
2052             ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2053              phba->params.wrbs_per_cxn);
2054
2055         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2056                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2057                 if (num_cxn_wrb) {
2058                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2059                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2060                                 pwrb_handle->pwrb = pwrb;
2061                                 pwrb++;
2062                         }
2063                         num_cxn_wrb--;
2064                 } else {
2065                         idx++;
2066                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2067                         num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2068                                         (sizeof(struct iscsi_wrb)) *
2069                                         phba->params.wrbs_per_cxn);
2070                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2071                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2072                                 pwrb_handle->pwrb = pwrb;
2073                                 pwrb++;
2074                         }
2075                         num_cxn_wrb--;
2076                 }
2077         }
2078 }
2079
2080 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2081 {
2082         struct hwi_controller *phwi_ctrlr;
2083         struct hba_parameters *p = &phba->params;
2084         struct hwi_async_pdu_context *pasync_ctx;
2085         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2086         unsigned int index;
2087         struct be_mem_descriptor *mem_descr;
2088
2089         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2090         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2091
2092         phwi_ctrlr = phba->phwi_ctrlr;
2093         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2094                                 mem_descr->mem_array[0].virtual_address;
2095         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2096         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2097
2098         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2099         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2100         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2101         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2102
2103         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2104         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2105         if (mem_descr->mem_array[0].virtual_address) {
2106                 SE_DEBUG(DBG_LVL_8,
2107                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2108                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2109         } else
2110                 shost_printk(KERN_WARNING, phba->shost,
2111                              "No Virtual address \n");
2112
2113         pasync_ctx->async_header.va_base =
2114                         mem_descr->mem_array[0].virtual_address;
2115
2116         pasync_ctx->async_header.pa_base.u.a64.address =
2117                         mem_descr->mem_array[0].bus_address.u.a64.address;
2118
2119         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2120         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2121         if (mem_descr->mem_array[0].virtual_address) {
2122                 SE_DEBUG(DBG_LVL_8,
2123                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2124                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2125         } else
2126                 shost_printk(KERN_WARNING, phba->shost,
2127                             "No Virtual address \n");
2128         pasync_ctx->async_header.ring_base =
2129                         mem_descr->mem_array[0].virtual_address;
2130
2131         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2132         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2133         if (mem_descr->mem_array[0].virtual_address) {
2134                 SE_DEBUG(DBG_LVL_8,
2135                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2136                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2137         } else
2138                 shost_printk(KERN_WARNING, phba->shost,
2139                             "No Virtual address \n");
2140
2141         pasync_ctx->async_header.handle_base =
2142                         mem_descr->mem_array[0].virtual_address;
2143         pasync_ctx->async_header.writables = 0;
2144         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2145
2146         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2147         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2148         if (mem_descr->mem_array[0].virtual_address) {
2149                 SE_DEBUG(DBG_LVL_8,
2150                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2151                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2152         } else
2153                 shost_printk(KERN_WARNING, phba->shost,
2154                             "No Virtual address \n");
2155         pasync_ctx->async_data.va_base =
2156                         mem_descr->mem_array[0].virtual_address;
2157         pasync_ctx->async_data.pa_base.u.a64.address =
2158                         mem_descr->mem_array[0].bus_address.u.a64.address;
2159
2160         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2161         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2162         if (mem_descr->mem_array[0].virtual_address) {
2163                 SE_DEBUG(DBG_LVL_8,
2164                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2165                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2166         } else
2167                 shost_printk(KERN_WARNING, phba->shost,
2168                              "No Virtual address \n");
2169
2170         pasync_ctx->async_data.ring_base =
2171                         mem_descr->mem_array[0].virtual_address;
2172
2173         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2174         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2175         if (!mem_descr->mem_array[0].virtual_address)
2176                 shost_printk(KERN_WARNING, phba->shost,
2177                             "No Virtual address \n");
2178
2179         pasync_ctx->async_data.handle_base =
2180                         mem_descr->mem_array[0].virtual_address;
2181         pasync_ctx->async_data.writables = 0;
2182         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2183
2184         pasync_header_h =
2185                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2186         pasync_data_h =
2187                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2188
2189         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2190                 pasync_header_h->cri = -1;
2191                 pasync_header_h->index = (char)index;
2192                 INIT_LIST_HEAD(&pasync_header_h->link);
2193                 pasync_header_h->pbuffer =
2194                         (void *)((unsigned long)
2195                         (pasync_ctx->async_header.va_base) +
2196                         (p->defpdu_hdr_sz * index));
2197
2198                 pasync_header_h->pa.u.a64.address =
2199                         pasync_ctx->async_header.pa_base.u.a64.address +
2200                         (p->defpdu_hdr_sz * index);
2201
2202                 list_add_tail(&pasync_header_h->link,
2203                                 &pasync_ctx->async_header.free_list);
2204                 pasync_header_h++;
2205                 pasync_ctx->async_header.free_entries++;
2206                 pasync_ctx->async_header.writables++;
2207
2208                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2209                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2210                                header_busy_list);
2211                 pasync_data_h->cri = -1;
2212                 pasync_data_h->index = (char)index;
2213                 INIT_LIST_HEAD(&pasync_data_h->link);
2214                 pasync_data_h->pbuffer =
2215                         (void *)((unsigned long)
2216                         (pasync_ctx->async_data.va_base) +
2217                         (p->defpdu_data_sz * index));
2218
2219                 pasync_data_h->pa.u.a64.address =
2220                     pasync_ctx->async_data.pa_base.u.a64.address +
2221                     (p->defpdu_data_sz * index);
2222
2223                 list_add_tail(&pasync_data_h->link,
2224                               &pasync_ctx->async_data.free_list);
2225                 pasync_data_h++;
2226                 pasync_ctx->async_data.free_entries++;
2227                 pasync_ctx->async_data.writables++;
2228
2229                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2230         }
2231
2232         pasync_ctx->async_header.host_write_ptr = 0;
2233         pasync_ctx->async_header.ep_read_ptr = -1;
2234         pasync_ctx->async_data.host_write_ptr = 0;
2235         pasync_ctx->async_data.ep_read_ptr = -1;
2236 }
2237
2238 static int
2239 be_sgl_create_contiguous(void *virtual_address,
2240                          u64 physical_address, u32 length,
2241                          struct be_dma_mem *sgl)
2242 {
2243         WARN_ON(!virtual_address);
2244         WARN_ON(!physical_address);
2245         WARN_ON(!length > 0);
2246         WARN_ON(!sgl);
2247
2248         sgl->va = virtual_address;
2249         sgl->dma = physical_address;
2250         sgl->size = length;
2251
2252         return 0;
2253 }
2254
2255 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2256 {
2257         memset(sgl, 0, sizeof(*sgl));
2258 }
2259
2260 static void
2261 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2262                      struct mem_array *pmem, struct be_dma_mem *sgl)
2263 {
2264         if (sgl->va)
2265                 be_sgl_destroy_contiguous(sgl);
2266
2267         be_sgl_create_contiguous(pmem->virtual_address,
2268                                  pmem->bus_address.u.a64.address,
2269                                  pmem->size, sgl);
2270 }
2271
2272 static void
2273 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2274                            struct mem_array *pmem, struct be_dma_mem *sgl)
2275 {
2276         if (sgl->va)
2277                 be_sgl_destroy_contiguous(sgl);
2278
2279         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2280                                  pmem->bus_address.u.a64.address,
2281                                  pmem->size, sgl);
2282 }
2283
2284 static int be_fill_queue(struct be_queue_info *q,
2285                 u16 len, u16 entry_size, void *vaddress)
2286 {
2287         struct be_dma_mem *mem = &q->dma_mem;
2288
2289         memset(q, 0, sizeof(*q));
2290         q->len = len;
2291         q->entry_size = entry_size;
2292         mem->size = len * entry_size;
2293         mem->va = vaddress;
2294         if (!mem->va)
2295                 return -ENOMEM;
2296         memset(mem->va, 0, mem->size);
2297         return 0;
2298 }
2299
2300 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2301                              struct hwi_context_memory *phwi_context)
2302 {
2303         unsigned int i, num_eq_pages;
2304         int ret, eq_for_mcc;
2305         struct be_queue_info *eq;
2306         struct be_dma_mem *mem;
2307         void *eq_vaddress;
2308         dma_addr_t paddr;
2309
2310         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2311                                       sizeof(struct be_eq_entry));
2312
2313         if (phba->msix_enabled)
2314                 eq_for_mcc = 1;
2315         else
2316                 eq_for_mcc = 0;
2317         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2318                 eq = &phwi_context->be_eq[i].q;
2319                 mem = &eq->dma_mem;
2320                 phwi_context->be_eq[i].phba = phba;
2321                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2322                                                      num_eq_pages * PAGE_SIZE,
2323                                                      &paddr);
2324                 if (!eq_vaddress)
2325                         goto create_eq_error;
2326
2327                 mem->va = eq_vaddress;
2328                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2329                                     sizeof(struct be_eq_entry), eq_vaddress);
2330                 if (ret) {
2331                         shost_printk(KERN_ERR, phba->shost,
2332                                      "be_fill_queue Failed for EQ \n");
2333                         goto create_eq_error;
2334                 }
2335
2336                 mem->dma = paddr;
2337                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2338                                             phwi_context->cur_eqd);
2339                 if (ret) {
2340                         shost_printk(KERN_ERR, phba->shost,
2341                                      "beiscsi_cmd_eq_create"
2342                                      "Failedfor EQ \n");
2343                         goto create_eq_error;
2344                 }
2345                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2346         }
2347         return 0;
2348 create_eq_error:
2349         for (i = 0; i < (phba->num_cpus + 1); i++) {
2350                 eq = &phwi_context->be_eq[i].q;
2351                 mem = &eq->dma_mem;
2352                 if (mem->va)
2353                         pci_free_consistent(phba->pcidev, num_eq_pages
2354                                             * PAGE_SIZE,
2355                                             mem->va, mem->dma);
2356         }
2357         return ret;
2358 }
2359
2360 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2361                              struct hwi_context_memory *phwi_context)
2362 {
2363         unsigned int i, num_cq_pages;
2364         int ret;
2365         struct be_queue_info *cq, *eq;
2366         struct be_dma_mem *mem;
2367         struct be_eq_obj *pbe_eq;
2368         void *cq_vaddress;
2369         dma_addr_t paddr;
2370
2371         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2372                                       sizeof(struct sol_cqe));
2373
2374         for (i = 0; i < phba->num_cpus; i++) {
2375                 cq = &phwi_context->be_cq[i];
2376                 eq = &phwi_context->be_eq[i].q;
2377                 pbe_eq = &phwi_context->be_eq[i];
2378                 pbe_eq->cq = cq;
2379                 pbe_eq->phba = phba;
2380                 mem = &cq->dma_mem;
2381                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2382                                                      num_cq_pages * PAGE_SIZE,
2383                                                      &paddr);
2384                 if (!cq_vaddress)
2385                         goto create_cq_error;
2386                 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
2387                                     sizeof(struct sol_cqe), cq_vaddress);
2388                 if (ret) {
2389                         shost_printk(KERN_ERR, phba->shost,
2390                                      "be_fill_queue Failed for ISCSI CQ \n");
2391                         goto create_cq_error;
2392                 }
2393
2394                 mem->dma = paddr;
2395                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2396                                             false, 0);
2397                 if (ret) {
2398                         shost_printk(KERN_ERR, phba->shost,
2399                                      "beiscsi_cmd_eq_create"
2400                                      "Failed for ISCSI CQ \n");
2401                         goto create_cq_error;
2402                 }
2403                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2404                                                  cq->id, eq->id);
2405                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2406         }
2407         return 0;
2408
2409 create_cq_error:
2410         for (i = 0; i < phba->num_cpus; i++) {
2411                 cq = &phwi_context->be_cq[i];
2412                 mem = &cq->dma_mem;
2413                 if (mem->va)
2414                         pci_free_consistent(phba->pcidev, num_cq_pages
2415                                             * PAGE_SIZE,
2416                                             mem->va, mem->dma);
2417         }
2418         return ret;
2419
2420 }
2421
2422 static int
2423 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2424                        struct hwi_context_memory *phwi_context,
2425                        struct hwi_controller *phwi_ctrlr,
2426                        unsigned int def_pdu_ring_sz)
2427 {
2428         unsigned int idx;
2429         int ret;
2430         struct be_queue_info *dq, *cq;
2431         struct be_dma_mem *mem;
2432         struct be_mem_descriptor *mem_descr;
2433         void *dq_vaddress;
2434
2435         idx = 0;
2436         dq = &phwi_context->be_def_hdrq;
2437         cq = &phwi_context->be_cq[0];
2438         mem = &dq->dma_mem;
2439         mem_descr = phba->init_mem;
2440         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2441         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2442         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2443                             sizeof(struct phys_addr),
2444                             sizeof(struct phys_addr), dq_vaddress);
2445         if (ret) {
2446                 shost_printk(KERN_ERR, phba->shost,
2447                              "be_fill_queue Failed for DEF PDU HDR\n");
2448                 return ret;
2449         }
2450         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2451         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2452                                               def_pdu_ring_sz,
2453                                               phba->params.defpdu_hdr_sz);
2454         if (ret) {
2455                 shost_printk(KERN_ERR, phba->shost,
2456                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2457                 return ret;
2458         }
2459         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2460         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2461                  phwi_context->be_def_hdrq.id);
2462         hwi_post_async_buffers(phba, 1);
2463         return 0;
2464 }
2465
2466 static int
2467 beiscsi_create_def_data(struct beiscsi_hba *phba,
2468                         struct hwi_context_memory *phwi_context,
2469                         struct hwi_controller *phwi_ctrlr,
2470                         unsigned int def_pdu_ring_sz)
2471 {
2472         unsigned int idx;
2473         int ret;
2474         struct be_queue_info *dataq, *cq;
2475         struct be_dma_mem *mem;
2476         struct be_mem_descriptor *mem_descr;
2477         void *dq_vaddress;
2478
2479         idx = 0;
2480         dataq = &phwi_context->be_def_dataq;
2481         cq = &phwi_context->be_cq[0];
2482         mem = &dataq->dma_mem;
2483         mem_descr = phba->init_mem;
2484         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2485         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2486         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2487                             sizeof(struct phys_addr),
2488                             sizeof(struct phys_addr), dq_vaddress);
2489         if (ret) {
2490                 shost_printk(KERN_ERR, phba->shost,
2491                              "be_fill_queue Failed for DEF PDU DATA\n");
2492                 return ret;
2493         }
2494         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2495         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2496                                               def_pdu_ring_sz,
2497                                               phba->params.defpdu_data_sz);
2498         if (ret) {
2499                 shost_printk(KERN_ERR, phba->shost,
2500                              "be_cmd_create_default_pdu_queue Failed"
2501                              " for DEF PDU DATA\n");
2502                 return ret;
2503         }
2504         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2505         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2506                  phwi_context->be_def_dataq.id);
2507         hwi_post_async_buffers(phba, 0);
2508         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2509         return 0;
2510 }
2511
2512 static int
2513 beiscsi_post_pages(struct beiscsi_hba *phba)
2514 {
2515         struct be_mem_descriptor *mem_descr;
2516         struct mem_array *pm_arr;
2517         unsigned int page_offset, i;
2518         struct be_dma_mem sgl;
2519         int status;
2520
2521         mem_descr = phba->init_mem;
2522         mem_descr += HWI_MEM_SGE;
2523         pm_arr = mem_descr->mem_array;
2524
2525         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2526                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2527         for (i = 0; i < mem_descr->num_elements; i++) {
2528                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2529                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2530                                                 page_offset,
2531                                                 (pm_arr->size / PAGE_SIZE));
2532                 page_offset += pm_arr->size / PAGE_SIZE;
2533                 if (status != 0) {
2534                         shost_printk(KERN_ERR, phba->shost,
2535                                      "post sgl failed.\n");
2536                         return status;
2537                 }
2538                 pm_arr++;
2539         }
2540         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2541         return 0;
2542 }
2543
2544 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2545 {
2546         struct be_dma_mem *mem = &q->dma_mem;
2547         if (mem->va)
2548                 pci_free_consistent(phba->pcidev, mem->size,
2549                         mem->va, mem->dma);
2550 }
2551
2552 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2553                 u16 len, u16 entry_size)
2554 {
2555         struct be_dma_mem *mem = &q->dma_mem;
2556
2557         memset(q, 0, sizeof(*q));
2558         q->len = len;
2559         q->entry_size = entry_size;
2560         mem->size = len * entry_size;
2561         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2562         if (!mem->va)
2563                 return -1;
2564         memset(mem->va, 0, mem->size);
2565         return 0;
2566 }
2567
2568 static int
2569 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2570                          struct hwi_context_memory *phwi_context,
2571                          struct hwi_controller *phwi_ctrlr)
2572 {
2573         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2574         u64 pa_addr_lo;
2575         unsigned int idx, num, i;
2576         struct mem_array *pwrb_arr;
2577         void *wrb_vaddr;
2578         struct be_dma_mem sgl;
2579         struct be_mem_descriptor *mem_descr;
2580         int status;
2581
2582         idx = 0;
2583         mem_descr = phba->init_mem;
2584         mem_descr += HWI_MEM_WRB;
2585         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2586                            GFP_KERNEL);
2587         if (!pwrb_arr) {
2588                 shost_printk(KERN_ERR, phba->shost,
2589                              "Memory alloc failed in create wrb ring.\n");
2590                 return -ENOMEM;
2591         }
2592         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2593         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2594         num_wrb_rings = mem_descr->mem_array[idx].size /
2595                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2596
2597         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2598                 if (num_wrb_rings) {
2599                         pwrb_arr[num].virtual_address = wrb_vaddr;
2600                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2601                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2602                                             sizeof(struct iscsi_wrb);
2603                         wrb_vaddr += pwrb_arr[num].size;
2604                         pa_addr_lo += pwrb_arr[num].size;
2605                         num_wrb_rings--;
2606                 } else {
2607                         idx++;
2608                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2609                         pa_addr_lo = mem_descr->mem_array[idx].\
2610                                         bus_address.u.a64.address;
2611                         num_wrb_rings = mem_descr->mem_array[idx].size /
2612                                         (phba->params.wrbs_per_cxn *
2613                                         sizeof(struct iscsi_wrb));
2614                         pwrb_arr[num].virtual_address = wrb_vaddr;
2615                         pwrb_arr[num].bus_address.u.a64.address\
2616                                                 = pa_addr_lo;
2617                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2618                                                  sizeof(struct iscsi_wrb);
2619                         wrb_vaddr += pwrb_arr[num].size;
2620                         pa_addr_lo   += pwrb_arr[num].size;
2621                         num_wrb_rings--;
2622                 }
2623         }
2624         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2625                 wrb_mem_index = 0;
2626                 offset = 0;
2627                 size = 0;
2628
2629                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2630                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2631                                             &phwi_context->be_wrbq[i]);
2632                 if (status != 0) {
2633                         shost_printk(KERN_ERR, phba->shost,
2634                                      "wrbq create failed.");
2635                         return status;
2636                 }
2637                 phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
2638         }
2639         kfree(pwrb_arr);
2640         return 0;
2641 }
2642
2643 static void free_wrb_handles(struct beiscsi_hba *phba)
2644 {
2645         unsigned int index;
2646         struct hwi_controller *phwi_ctrlr;
2647         struct hwi_wrb_context *pwrb_context;
2648
2649         phwi_ctrlr = phba->phwi_ctrlr;
2650         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2651                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2652                 kfree(pwrb_context->pwrb_handle_base);
2653                 kfree(pwrb_context->pwrb_handle_basestd);
2654         }
2655 }
2656
2657 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2658 {
2659         struct be_queue_info *q;
2660         struct be_ctrl_info *ctrl = &phba->ctrl;
2661
2662         q = &phba->ctrl.mcc_obj.q;
2663         if (q->created)
2664                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2665         be_queue_free(phba, q);
2666
2667         q = &phba->ctrl.mcc_obj.cq;
2668         if (q->created)
2669                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2670         be_queue_free(phba, q);
2671 }
2672
2673 static void hwi_cleanup(struct beiscsi_hba *phba)
2674 {
2675         struct be_queue_info *q;
2676         struct be_ctrl_info *ctrl = &phba->ctrl;
2677         struct hwi_controller *phwi_ctrlr;
2678         struct hwi_context_memory *phwi_context;
2679         int i, eq_num;
2680
2681         phwi_ctrlr = phba->phwi_ctrlr;
2682         phwi_context = phwi_ctrlr->phwi_ctxt;
2683         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2684                 q = &phwi_context->be_wrbq[i];
2685                 if (q->created)
2686                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2687         }
2688         free_wrb_handles(phba);
2689
2690         q = &phwi_context->be_def_hdrq;
2691         if (q->created)
2692                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2693
2694         q = &phwi_context->be_def_dataq;
2695         if (q->created)
2696                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2697
2698         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2699
2700         for (i = 0; i < (phba->num_cpus); i++) {
2701                 q = &phwi_context->be_cq[i];
2702                 if (q->created)
2703                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2704         }
2705         if (phba->msix_enabled)
2706                 eq_num = 1;
2707         else
2708                 eq_num = 0;
2709         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2710                 q = &phwi_context->be_eq[i].q;
2711                 if (q->created)
2712                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2713         }
2714         be_mcc_queues_destroy(phba);
2715 }
2716
2717 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2718                                 struct hwi_context_memory *phwi_context)
2719 {
2720         struct be_queue_info *q, *cq;
2721         struct be_ctrl_info *ctrl = &phba->ctrl;
2722
2723         /* Alloc MCC compl queue */
2724         cq = &phba->ctrl.mcc_obj.cq;
2725         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2726                         sizeof(struct be_mcc_compl)))
2727                 goto err;
2728         /* Ask BE to create MCC compl queue; */
2729         if (phba->msix_enabled) {
2730                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2731                                          [phba->num_cpus].q, false, true, 0))
2732                 goto mcc_cq_free;
2733         } else {
2734                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2735                                           false, true, 0))
2736                 goto mcc_cq_free;
2737         }
2738
2739         /* Alloc MCC queue */
2740         q = &phba->ctrl.mcc_obj.q;
2741         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2742                 goto mcc_cq_destroy;
2743
2744         /* Ask BE to create MCC queue */
2745         if (beiscsi_cmd_mccq_create(phba, q, cq))
2746                 goto mcc_q_free;
2747
2748         return 0;
2749
2750 mcc_q_free:
2751         be_queue_free(phba, q);
2752 mcc_cq_destroy:
2753         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2754 mcc_cq_free:
2755         be_queue_free(phba, cq);
2756 err:
2757         return -1;
2758 }
2759
2760 static int find_num_cpus(void)
2761 {
2762         int  num_cpus = 0;
2763
2764         num_cpus = num_online_cpus();
2765         if (num_cpus >= MAX_CPUS)
2766                 num_cpus = MAX_CPUS - 1;
2767
2768         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2769         return num_cpus;
2770 }
2771
2772 static int hwi_init_port(struct beiscsi_hba *phba)
2773 {
2774         struct hwi_controller *phwi_ctrlr;
2775         struct hwi_context_memory *phwi_context;
2776         unsigned int def_pdu_ring_sz;
2777         struct be_ctrl_info *ctrl = &phba->ctrl;
2778         int status;
2779
2780         def_pdu_ring_sz =
2781                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2782         phwi_ctrlr = phba->phwi_ctrlr;
2783         phwi_context = phwi_ctrlr->phwi_ctxt;
2784         phwi_context->max_eqd = 0;
2785         phwi_context->min_eqd = 0;
2786         phwi_context->cur_eqd = 64;
2787         be_cmd_fw_initialize(&phba->ctrl);
2788
2789         status = beiscsi_create_eqs(phba, phwi_context);
2790         if (status != 0) {
2791                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2792                 goto error;
2793         }
2794
2795         status = be_mcc_queues_create(phba, phwi_context);
2796         if (status != 0)
2797                 goto error;
2798
2799         status = mgmt_check_supported_fw(ctrl, phba);
2800         if (status != 0) {
2801                 shost_printk(KERN_ERR, phba->shost,
2802                              "Unsupported fw version \n");
2803                 goto error;
2804         }
2805
2806         if (phba->fw_config.iscsi_features == 0x1)
2807                 ring_mode = 1;
2808         else
2809                 ring_mode = 0;
2810         status = mgmt_get_fw_config(ctrl, phba);
2811         if (status != 0) {
2812                 shost_printk(KERN_ERR, phba->shost,
2813                              "Error getting fw config\n");
2814                 goto error;
2815         }
2816
2817         status = beiscsi_create_cqs(phba, phwi_context);
2818         if (status != 0) {
2819                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2820                 goto error;
2821         }
2822
2823         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2824                                         def_pdu_ring_sz);
2825         if (status != 0) {
2826                 shost_printk(KERN_ERR, phba->shost,
2827                              "Default Header not created\n");
2828                 goto error;
2829         }
2830
2831         status = beiscsi_create_def_data(phba, phwi_context,
2832                                          phwi_ctrlr, def_pdu_ring_sz);
2833         if (status != 0) {
2834                 shost_printk(KERN_ERR, phba->shost,
2835                              "Default Data not created\n");
2836                 goto error;
2837         }
2838
2839         status = beiscsi_post_pages(phba);
2840         if (status != 0) {
2841                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2842                 goto error;
2843         }
2844
2845         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2846         if (status != 0) {
2847                 shost_printk(KERN_ERR, phba->shost,
2848                              "WRB Rings not created\n");
2849                 goto error;
2850         }
2851
2852         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2853         return 0;
2854
2855 error:
2856         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2857         hwi_cleanup(phba);
2858         return -ENOMEM;
2859 }
2860
2861 static int hwi_init_controller(struct beiscsi_hba *phba)
2862 {
2863         struct hwi_controller *phwi_ctrlr;
2864
2865         phwi_ctrlr = phba->phwi_ctrlr;
2866         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2867                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2868                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2869                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2870                          phwi_ctrlr->phwi_ctxt);
2871         } else {
2872                 shost_printk(KERN_ERR, phba->shost,
2873                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2874                              "Failing to load\n");
2875                 return -ENOMEM;
2876         }
2877
2878         iscsi_init_global_templates(phba);
2879         beiscsi_init_wrb_handle(phba);
2880         hwi_init_async_pdu_ctx(phba);
2881         if (hwi_init_port(phba) != 0) {
2882                 shost_printk(KERN_ERR, phba->shost,
2883                              "hwi_init_controller failed\n");
2884                 return -ENOMEM;
2885         }
2886         return 0;
2887 }
2888
2889 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2890 {
2891         struct be_mem_descriptor *mem_descr;
2892         int i, j;
2893
2894         mem_descr = phba->init_mem;
2895         i = 0;
2896         j = 0;
2897         for (i = 0; i < SE_MEM_MAX; i++) {
2898                 for (j = mem_descr->num_elements; j > 0; j--) {
2899                         pci_free_consistent(phba->pcidev,
2900                           mem_descr->mem_array[j - 1].size,
2901                           mem_descr->mem_array[j - 1].virtual_address,
2902                           mem_descr->mem_array[j - 1].bus_address.
2903                                 u.a64.address);
2904                 }
2905                 kfree(mem_descr->mem_array);
2906                 mem_descr++;
2907         }
2908         kfree(phba->init_mem);
2909         kfree(phba->phwi_ctrlr);
2910 }
2911
2912 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2913 {
2914         int ret = -ENOMEM;
2915
2916         ret = beiscsi_get_memory(phba);
2917         if (ret < 0) {
2918                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2919                              "Failed in beiscsi_alloc_memory \n");
2920                 return ret;
2921         }
2922
2923         ret = hwi_init_controller(phba);
2924         if (ret)
2925                 goto free_init;
2926         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2927         return 0;
2928
2929 free_init:
2930         beiscsi_free_mem(phba);
2931         return -ENOMEM;
2932 }
2933
2934 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2935 {
2936         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2937         struct sgl_handle *psgl_handle;
2938         struct iscsi_sge *pfrag;
2939         unsigned int arr_index, i, idx;
2940
2941         phba->io_sgl_hndl_avbl = 0;
2942         phba->eh_sgl_hndl_avbl = 0;
2943
2944         if (ring_mode) {
2945                 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2946                                               phba->params.icds_per_ctrl,
2947                                                  GFP_KERNEL);
2948                 if (!phba->sgl_hndl_array) {
2949                         shost_printk(KERN_ERR, phba->shost,
2950                              "Mem Alloc Failed. Failing to load\n");
2951                         return -ENOMEM;
2952                 }
2953         }
2954
2955         mem_descr_sglh = phba->init_mem;
2956         mem_descr_sglh += HWI_MEM_SGLH;
2957         if (1 == mem_descr_sglh->num_elements) {
2958                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2959                                                  phba->params.ios_per_ctrl,
2960                                                  GFP_KERNEL);
2961                 if (!phba->io_sgl_hndl_base) {
2962                         if (ring_mode)
2963                                 kfree(phba->sgl_hndl_array);
2964                         shost_printk(KERN_ERR, phba->shost,
2965                                      "Mem Alloc Failed. Failing to load\n");
2966                         return -ENOMEM;
2967                 }
2968                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2969                                                  (phba->params.icds_per_ctrl -
2970                                                  phba->params.ios_per_ctrl),
2971                                                  GFP_KERNEL);
2972                 if (!phba->eh_sgl_hndl_base) {
2973                         kfree(phba->io_sgl_hndl_base);
2974                         shost_printk(KERN_ERR, phba->shost,
2975                                      "Mem Alloc Failed. Failing to load\n");
2976                         return -ENOMEM;
2977                 }
2978         } else {
2979                 shost_printk(KERN_ERR, phba->shost,
2980                              "HWI_MEM_SGLH is more than one element."
2981                              "Failing to load\n");
2982                 return -ENOMEM;
2983         }
2984
2985         arr_index = 0;
2986         idx = 0;
2987         while (idx < mem_descr_sglh->num_elements) {
2988                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2989
2990                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2991                       sizeof(struct sgl_handle)); i++) {
2992                         if (arr_index < phba->params.ios_per_ctrl) {
2993                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2994                                 phba->io_sgl_hndl_avbl++;
2995                                 arr_index++;
2996                         } else {
2997                                 phba->eh_sgl_hndl_base[arr_index -
2998                                         phba->params.ios_per_ctrl] =
2999                                                                 psgl_handle;
3000                                 arr_index++;
3001                                 phba->eh_sgl_hndl_avbl++;
3002                         }
3003                         psgl_handle++;
3004                 }
3005                 idx++;
3006         }
3007         SE_DEBUG(DBG_LVL_8,
3008                  "phba->io_sgl_hndl_avbl=%d"
3009                  "phba->eh_sgl_hndl_avbl=%d \n",
3010                  phba->io_sgl_hndl_avbl,
3011                  phba->eh_sgl_hndl_avbl);
3012         mem_descr_sg = phba->init_mem;
3013         mem_descr_sg += HWI_MEM_SGE;
3014         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3015                  mem_descr_sg->num_elements);
3016         arr_index = 0;
3017         idx = 0;
3018         while (idx < mem_descr_sg->num_elements) {
3019                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3020
3021                 for (i = 0;
3022                      i < (mem_descr_sg->mem_array[idx].size) /
3023                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3024                      i++) {
3025                         if (arr_index < phba->params.ios_per_ctrl)
3026                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3027                         else
3028                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3029                                                 phba->params.ios_per_ctrl];
3030                         psgl_handle->pfrag = pfrag;
3031                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3032                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3033                         pfrag += phba->params.num_sge_per_io;
3034                         psgl_handle->sgl_index =
3035                                 phba->fw_config.iscsi_cid_start + arr_index++;
3036                 }
3037                 idx++;
3038         }
3039         phba->io_sgl_free_index = 0;
3040         phba->io_sgl_alloc_index = 0;
3041         phba->eh_sgl_free_index = 0;
3042         phba->eh_sgl_alloc_index = 0;
3043         return 0;
3044 }
3045
3046 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3047 {
3048         int i, new_cid;
3049
3050         phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3051                                   GFP_KERNEL);
3052         if (!phba->cid_array) {
3053                 shost_printk(KERN_ERR, phba->shost,
3054                              "Failed to allocate memory in "
3055                              "hba_setup_cid_tbls\n");
3056                 return -ENOMEM;
3057         }
3058         phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
3059                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3060         if (!phba->ep_array) {
3061                 shost_printk(KERN_ERR, phba->shost,
3062                              "Failed to allocate memory in "
3063                              "hba_setup_cid_tbls \n");
3064                 kfree(phba->cid_array);
3065                 return -ENOMEM;
3066         }
3067         new_cid = phba->fw_config.iscsi_icd_start;
3068         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3069                 phba->cid_array[i] = new_cid;
3070                 new_cid += 2;
3071         }
3072         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3073         return 0;
3074 }
3075
3076 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3077 {
3078         struct be_ctrl_info *ctrl = &phba->ctrl;
3079         struct hwi_controller *phwi_ctrlr;
3080         struct hwi_context_memory *phwi_context;
3081         struct be_queue_info *eq;
3082         u8 __iomem *addr;
3083         u32 reg, i;
3084         u32 enabled;
3085
3086         phwi_ctrlr = phba->phwi_ctrlr;
3087         phwi_context = phwi_ctrlr->phwi_ctxt;
3088
3089         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3090                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3091         reg = ioread32(addr);
3092         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3093
3094         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3095         if (!enabled) {
3096                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3097                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3098                 iowrite32(reg, addr);
3099                 for (i = 0; i <= phba->num_cpus; i++) {
3100                         eq = &phwi_context->be_eq[i].q;
3101                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3102                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3103                 }
3104         } else
3105                 shost_printk(KERN_WARNING, phba->shost,
3106                              "In hwi_enable_intr, Not Enabled \n");
3107         return true;
3108 }
3109
3110 static void hwi_disable_intr(struct beiscsi_hba *phba)
3111 {
3112         struct be_ctrl_info *ctrl = &phba->ctrl;
3113
3114         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3115         u32 reg = ioread32(addr);
3116
3117         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3118         if (enabled) {
3119                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3120                 iowrite32(reg, addr);
3121         } else
3122                 shost_printk(KERN_WARNING, phba->shost,
3123                              "In hwi_disable_intr, Already Disabled \n");
3124 }
3125
3126 static int beiscsi_init_port(struct beiscsi_hba *phba)
3127 {
3128         int ret;
3129
3130         ret = beiscsi_init_controller(phba);
3131         if (ret < 0) {
3132                 shost_printk(KERN_ERR, phba->shost,
3133                              "beiscsi_dev_probe - Failed in"
3134                              "beiscsi_init_controller \n");
3135                 return ret;
3136         }
3137         ret = beiscsi_init_sgl_handle(phba);
3138         if (ret < 0) {
3139                 shost_printk(KERN_ERR, phba->shost,
3140                              "beiscsi_dev_probe - Failed in"
3141                              "beiscsi_init_sgl_handle \n");
3142                 goto do_cleanup_ctrlr;
3143         }
3144
3145         if (hba_setup_cid_tbls(phba)) {
3146                 shost_printk(KERN_ERR, phba->shost,
3147                              "Failed in hba_setup_cid_tbls\n");
3148                 if (ring_mode)
3149                         kfree(phba->sgl_hndl_array);
3150                 kfree(phba->io_sgl_hndl_base);
3151                 kfree(phba->eh_sgl_hndl_base);
3152                 goto do_cleanup_ctrlr;
3153         }
3154
3155         return ret;
3156
3157 do_cleanup_ctrlr:
3158         hwi_cleanup(phba);
3159         return ret;
3160 }
3161
3162 static void hwi_purge_eq(struct beiscsi_hba *phba)
3163 {
3164         struct hwi_controller *phwi_ctrlr;
3165         struct hwi_context_memory *phwi_context;
3166         struct be_queue_info *eq;
3167         struct be_eq_entry *eqe = NULL;
3168         int i, eq_msix;
3169
3170         phwi_ctrlr = phba->phwi_ctrlr;
3171         phwi_context = phwi_ctrlr->phwi_ctxt;
3172         if (phba->msix_enabled)
3173                 eq_msix = 1;
3174         else
3175                 eq_msix = 0;
3176
3177         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3178                 eq = &phwi_context->be_eq[i].q;
3179                 eqe = queue_tail_node(eq);
3180
3181                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3182                                         & EQE_VALID_MASK) {
3183                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3184                         queue_tail_inc(eq);
3185                         eqe = queue_tail_node(eq);
3186                 }
3187         }
3188 }
3189
3190 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3191 {
3192         unsigned char mgmt_status;
3193
3194         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3195         if (mgmt_status)
3196                 shost_printk(KERN_WARNING, phba->shost,
3197                              "mgmt_epfw_cleanup FAILED \n");
3198         hwi_cleanup(phba);
3199         hwi_purge_eq(phba);
3200         if (ring_mode)
3201                 kfree(phba->sgl_hndl_array);
3202         kfree(phba->io_sgl_hndl_base);
3203         kfree(phba->eh_sgl_hndl_base);
3204         kfree(phba->cid_array);
3205         kfree(phba->ep_array);
3206 }
3207
3208 void
3209 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3210                            struct beiscsi_offload_params *params)
3211 {
3212         struct wrb_handle *pwrb_handle;
3213         struct iscsi_target_context_update_wrb *pwrb = NULL;
3214         struct be_mem_descriptor *mem_descr;
3215         struct beiscsi_hba *phba = beiscsi_conn->phba;
3216         u32 doorbell = 0;
3217
3218         /*
3219          * We can always use 0 here because it is reserved by libiscsi for
3220          * login/startup related tasks.
3221          */
3222         pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
3223         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3224         memset(pwrb, 0, sizeof(*pwrb));
3225         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3226                       max_burst_length, pwrb, params->dw[offsetof
3227                       (struct amap_beiscsi_offload_params,
3228                       max_burst_length) / 32]);
3229         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3230                       max_send_data_segment_length, pwrb,
3231                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3232                       max_send_data_segment_length) / 32]);
3233         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3234                       first_burst_length,
3235                       pwrb,
3236                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3237                       first_burst_length) / 32]);
3238
3239         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3240                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3241                       erl) / 32] & OFFLD_PARAMS_ERL));
3242         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3243                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3244                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3245         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3246                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3247                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3248         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3249                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3250                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3251         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3252                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3253                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3254         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3255                       pwrb,
3256                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3257                       exp_statsn) / 32] + 1));
3258         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3259                       0x7);
3260         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3261                       pwrb, pwrb_handle->wrb_index);
3262         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3263                       pwrb, pwrb_handle->nxt_wrb_index);
3264         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3265                         session_state, pwrb, 0);
3266         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3267                       pwrb, 1);
3268         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3269                       pwrb, 0);
3270         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3271                       0);
3272
3273         mem_descr = phba->init_mem;
3274         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3275
3276         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3277                         pad_buffer_addr_hi, pwrb,
3278                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3279         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3280                         pad_buffer_addr_lo, pwrb,
3281                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3282
3283         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3284
3285         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3286         if (!ring_mode)
3287                 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3288                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3289         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3290
3291         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3292 }
3293
3294 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3295                               int *index, int *age)
3296 {
3297         *index = (int)itt;
3298         if (age)
3299                 *age = conn->session->age;
3300 }
3301
3302 /**
3303  * beiscsi_alloc_pdu - allocates pdu and related resources
3304  * @task: libiscsi task
3305  * @opcode: opcode of pdu for task
3306  *
3307  * This is called with the session lock held. It will allocate
3308  * the wrb and sgl if needed for the command. And it will prep
3309  * the pdu's itt. beiscsi_parse_pdu will later translate
3310  * the pdu itt to the libiscsi task itt.
3311  */
3312 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3313 {
3314         struct beiscsi_io_task *io_task = task->dd_data;
3315         struct iscsi_conn *conn = task->conn;
3316         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3317         struct beiscsi_hba *phba = beiscsi_conn->phba;
3318         struct hwi_wrb_context *pwrb_context;
3319         struct hwi_controller *phwi_ctrlr;
3320         itt_t itt;
3321         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3322         dma_addr_t paddr;
3323
3324         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3325                                           GFP_KERNEL, &paddr);
3326         if (!io_task->cmd_bhs)
3327                 return -ENOMEM;
3328         io_task->bhs_pa.u.a64.address = paddr;
3329         io_task->libiscsi_itt = (itt_t)task->itt;
3330         io_task->pwrb_handle = alloc_wrb_handle(phba,
3331                                                 beiscsi_conn->beiscsi_conn_cid,
3332                                                 task->itt);
3333         io_task->conn = beiscsi_conn;
3334
3335         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3336         task->hdr_max = sizeof(struct be_cmd_bhs);
3337
3338         if (task->sc) {
3339                 spin_lock(&phba->io_sgl_lock);
3340                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3341                 spin_unlock(&phba->io_sgl_lock);
3342                 if (!io_task->psgl_handle)
3343                         goto free_hndls;
3344         } else {
3345                 io_task->scsi_cmnd = NULL;
3346                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3347                         if (!beiscsi_conn->login_in_progress) {
3348                                 spin_lock(&phba->mgmt_sgl_lock);
3349                                 io_task->psgl_handle = (struct sgl_handle *)
3350                                                 alloc_mgmt_sgl_handle(phba);
3351                                 spin_unlock(&phba->mgmt_sgl_lock);
3352                                 if (!io_task->psgl_handle)
3353                                         goto free_hndls;
3354
3355                                 beiscsi_conn->login_in_progress = 1;
3356                                 beiscsi_conn->plogin_sgl_handle =
3357                                                         io_task->psgl_handle;
3358                         } else {
3359                                 io_task->psgl_handle =
3360                                                 beiscsi_conn->plogin_sgl_handle;
3361                         }
3362                 } else {
3363                         spin_lock(&phba->mgmt_sgl_lock);
3364                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3365                         spin_unlock(&phba->mgmt_sgl_lock);
3366                         if (!io_task->psgl_handle)
3367                                 goto free_hndls;
3368                 }
3369         }
3370         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3371                                  wrb_index << 16) | (unsigned int)
3372                                 (io_task->psgl_handle->sgl_index));
3373         if (ring_mode) {
3374                 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3375                                      phba->fw_config.iscsi_cid_start] =
3376                                      io_task->psgl_handle;
3377                 io_task->psgl_handle->task = task;
3378                 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid;
3379         } else
3380                 io_task->pwrb_handle->pio_handle = task;
3381
3382         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3383         return 0;
3384
3385 free_hndls:
3386         phwi_ctrlr = phba->phwi_ctrlr;
3387         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
3388         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3389         io_task->pwrb_handle = NULL;
3390         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3391                       io_task->bhs_pa.u.a64.address);
3392         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3393         return -ENOMEM;
3394 }
3395
3396 static void beiscsi_cleanup_task(struct iscsi_task *task)
3397 {
3398         struct beiscsi_io_task *io_task = task->dd_data;
3399         struct iscsi_conn *conn = task->conn;
3400         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3401         struct beiscsi_hba *phba = beiscsi_conn->phba;
3402         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3403         struct hwi_wrb_context *pwrb_context;
3404         struct hwi_controller *phwi_ctrlr;
3405
3406         phwi_ctrlr = phba->phwi_ctrlr;
3407         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
3408         if (io_task->pwrb_handle) {
3409                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3410                 io_task->pwrb_handle = NULL;
3411         }
3412
3413         if (io_task->cmd_bhs) {
3414                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3415                               io_task->bhs_pa.u.a64.address);
3416         }
3417
3418         if (task->sc) {
3419                 if (io_task->psgl_handle) {
3420                         spin_lock(&phba->io_sgl_lock);
3421                         free_io_sgl_handle(phba, io_task->psgl_handle);
3422                         spin_unlock(&phba->io_sgl_lock);
3423                         io_task->psgl_handle = NULL;
3424                 }
3425         } else {
3426                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3427                         return;
3428                 if (io_task->psgl_handle) {
3429                         spin_lock(&phba->mgmt_sgl_lock);
3430                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3431                         spin_unlock(&phba->mgmt_sgl_lock);
3432                         io_task->psgl_handle = NULL;
3433                 }
3434         }
3435 }
3436
3437 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3438                           unsigned int num_sg, unsigned int xferlen,
3439                           unsigned int writedir)
3440 {
3441
3442         struct beiscsi_io_task *io_task = task->dd_data;
3443         struct iscsi_conn *conn = task->conn;
3444         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3445         struct beiscsi_hba *phba = beiscsi_conn->phba;
3446         struct iscsi_wrb *pwrb = NULL;
3447         unsigned int doorbell = 0;
3448
3449         pwrb = io_task->pwrb_handle->pwrb;
3450         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3451         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3452
3453         if (writedir) {
3454                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3455                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3456                               &io_task->cmd_bhs->iscsi_data_pdu,
3457                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3458                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3459                               &io_task->cmd_bhs->iscsi_data_pdu,
3460                               ISCSI_OPCODE_SCSI_DATA_OUT);
3461                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3462                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3463                 if (ring_mode)
3464                         io_task->psgl_handle->type = INI_WR_CMD;
3465                 else
3466                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3467                                       INI_WR_CMD);
3468                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3469         } else {
3470                 if (ring_mode)
3471                         io_task->psgl_handle->type = INI_RD_CMD;
3472                 else
3473                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3474                                       INI_RD_CMD);
3475                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3476         }
3477         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3478                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3479                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3480
3481         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3482                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3483                                   lun[0]));
3484         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3485         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3486                       io_task->pwrb_handle->wrb_index);
3487         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3488                       be32_to_cpu(task->cmdsn));
3489         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3490                       io_task->psgl_handle->sgl_index);
3491
3492         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3493
3494         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3495                       io_task->pwrb_handle->nxt_wrb_index);
3496         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3497
3498         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3499         if (!ring_mode)
3500                 doorbell |= (io_task->pwrb_handle->wrb_index &
3501                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3502         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3503
3504         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3505         return 0;
3506 }
3507
3508 static int beiscsi_mtask(struct iscsi_task *task)
3509 {
3510         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3511         struct iscsi_conn *conn = task->conn;
3512         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3513         struct beiscsi_hba *phba = beiscsi_conn->phba;
3514         struct iscsi_session *session;
3515         struct iscsi_wrb *pwrb = NULL;
3516         struct hwi_controller *phwi_ctrlr;
3517         struct hwi_wrb_context *pwrb_context;
3518         struct wrb_handle *pwrb_handle;
3519         unsigned int doorbell = 0;
3520         unsigned int i, cid;
3521         struct iscsi_task *aborted_task;
3522
3523         cid = beiscsi_conn->beiscsi_conn_cid;
3524         pwrb = io_task->pwrb_handle->pwrb;
3525         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3526                       be32_to_cpu(task->cmdsn));
3527         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3528                       io_task->pwrb_handle->wrb_index);
3529         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3530                       io_task->psgl_handle->sgl_index);
3531
3532         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3533         case ISCSI_OP_LOGIN:
3534                 if (ring_mode)
3535                         io_task->psgl_handle->type = TGT_DM_CMD;
3536                 else
3537                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3538                                       TGT_DM_CMD);
3539                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3540                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3541                 hwi_write_buffer(pwrb, task);
3542                 break;
3543         case ISCSI_OP_NOOP_OUT:
3544                 if (ring_mode)
3545                         io_task->psgl_handle->type = INI_RD_CMD;
3546                 else
3547                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3548                                       INI_RD_CMD);
3549                 hwi_write_buffer(pwrb, task);
3550                 break;
3551         case ISCSI_OP_TEXT:
3552                 if (ring_mode)
3553                         io_task->psgl_handle->type = INI_WR_CMD;
3554                 else
3555                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3556                                       INI_WR_CMD);
3557                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3558                 hwi_write_buffer(pwrb, task);
3559                 break;
3560         case ISCSI_OP_SCSI_TMFUNC:
3561                 session = conn->session;
3562                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3563                 phwi_ctrlr = phba->phwi_ctrlr;
3564                 pwrb_context = &phwi_ctrlr->wrb_context[cid];
3565                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3566                                                                 >> 16];
3567                 aborted_task = pwrb_handle->pio_handle;
3568                  if (!aborted_task)
3569                         return 0;
3570
3571                 aborted_io_task = aborted_task->dd_data;
3572                 if (!aborted_io_task->scsi_cmnd)
3573                         return 0;
3574
3575                 mgmt_invalidate_icds(phba,
3576                                      aborted_io_task->psgl_handle->sgl_index,
3577                                      cid);
3578                 if (ring_mode)
3579                         io_task->psgl_handle->type = INI_TMF_CMD;
3580                 else
3581                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3582                                       INI_TMF_CMD);
3583                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3584                 hwi_write_buffer(pwrb, task);
3585                 break;
3586         case ISCSI_OP_LOGOUT:
3587                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3588                 if (ring_mode)
3589                         io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3590                 else
3591                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3592                                 HWH_TYPE_LOGOUT);
3593                 hwi_write_buffer(pwrb, task);
3594                 break;
3595
3596         default:
3597                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3598                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3599                 return -EINVAL;
3600         }
3601
3602         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3603                       be32_to_cpu(task->data_count));
3604         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3605                       io_task->pwrb_handle->nxt_wrb_index);
3606         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3607
3608         doorbell |= cid & DB_WRB_POST_CID_MASK;
3609         if (!ring_mode)
3610                 doorbell |= (io_task->pwrb_handle->wrb_index &
3611                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3612         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3613         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3614         return 0;
3615 }
3616
3617 static int beiscsi_task_xmit(struct iscsi_task *task)
3618 {
3619         struct iscsi_conn *conn = task->conn;
3620         struct beiscsi_io_task *io_task = task->dd_data;
3621         struct scsi_cmnd *sc = task->sc;
3622         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3623         struct scatterlist *sg;
3624         int num_sg;
3625         unsigned int  writedir = 0, xferlen = 0;
3626
3627         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3628                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3629                  task, conn, beiscsi_conn);
3630         if (!sc)
3631                 return beiscsi_mtask(task);
3632
3633         io_task->scsi_cmnd = sc;
3634         num_sg = scsi_dma_map(sc);
3635         if (num_sg < 0) {
3636                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3637                 return num_sg;
3638         }
3639         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3640                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3641         xferlen = scsi_bufflen(sc);
3642         sg = scsi_sglist(sc);
3643         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3644                 writedir = 1;
3645                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3646                          task->imm_count);
3647         } else
3648                 writedir = 0;
3649         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3650 }
3651
3652
3653 static void beiscsi_remove(struct pci_dev *pcidev)
3654 {
3655         struct beiscsi_hba *phba = NULL;
3656         struct hwi_controller *phwi_ctrlr;
3657         struct hwi_context_memory *phwi_context;
3658         struct be_eq_obj *pbe_eq;
3659         unsigned int i, msix_vec;
3660
3661         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3662         if (!phba) {
3663                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3664                 return;
3665         }
3666
3667         phwi_ctrlr = phba->phwi_ctrlr;
3668         phwi_context = phwi_ctrlr->phwi_ctxt;
3669         hwi_disable_intr(phba);
3670         if (phba->msix_enabled) {
3671                 for (i = 0; i <= phba->num_cpus; i++) {
3672                         msix_vec = phba->msix_entries[i].vector;
3673                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3674                 }
3675         } else
3676                 if (phba->pcidev->irq)
3677                         free_irq(phba->pcidev->irq, phba);
3678         pci_disable_msix(phba->pcidev);
3679         destroy_workqueue(phba->wq);
3680         if (blk_iopoll_enabled)
3681                 for (i = 0; i < phba->num_cpus; i++) {
3682                         pbe_eq = &phwi_context->be_eq[i];
3683                         blk_iopoll_disable(&pbe_eq->iopoll);
3684                 }
3685
3686         beiscsi_clean_port(phba);
3687         beiscsi_free_mem(phba);
3688         beiscsi_unmap_pci_function(phba);
3689         pci_free_consistent(phba->pcidev,
3690                             phba->ctrl.mbox_mem_alloced.size,
3691                             phba->ctrl.mbox_mem_alloced.va,
3692                             phba->ctrl.mbox_mem_alloced.dma);
3693         iscsi_host_remove(phba->shost);
3694         pci_dev_put(phba->pcidev);
3695         iscsi_host_free(phba->shost);
3696 }
3697
3698 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3699 {
3700         int i, status;
3701
3702         for (i = 0; i <= phba->num_cpus; i++)
3703                 phba->msix_entries[i].entry = i;
3704
3705         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3706                                  (phba->num_cpus + 1));
3707         if (!status)
3708                 phba->msix_enabled = true;
3709
3710         return;
3711 }
3712
3713 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3714                                 const struct pci_device_id *id)
3715 {
3716         struct beiscsi_hba *phba = NULL;
3717         struct hwi_controller *phwi_ctrlr;
3718         struct hwi_context_memory *phwi_context;
3719         struct be_eq_obj *pbe_eq;
3720         int ret, msix_vec, num_cpus, i;
3721
3722         ret = beiscsi_enable_pci(pcidev);
3723         if (ret < 0) {
3724                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3725                              "Failed to enable pci device \n");
3726                 return ret;
3727         }
3728
3729         phba = beiscsi_hba_alloc(pcidev);
3730         if (!phba) {
3731                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3732                         " Failed in beiscsi_hba_alloc \n");
3733                 goto disable_pci;
3734         }
3735         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3736
3737         pci_set_drvdata(pcidev, phba);
3738         if (enable_msix)
3739                 num_cpus = find_num_cpus();
3740         else
3741                 num_cpus = 1;
3742         phba->num_cpus = num_cpus;
3743         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3744
3745         if (enable_msix)
3746                 beiscsi_msix_enable(phba);
3747         ret = be_ctrl_init(phba, pcidev);
3748         if (ret) {
3749                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3750                                 "Failed in be_ctrl_init\n");
3751                 goto hba_free;
3752         }
3753
3754         spin_lock_init(&phba->io_sgl_lock);
3755         spin_lock_init(&phba->mgmt_sgl_lock);
3756         spin_lock_init(&phba->isr_lock);
3757         beiscsi_get_params(phba);
3758         ret = beiscsi_init_port(phba);
3759         if (ret < 0) {
3760                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3761                              "Failed in beiscsi_init_port\n");
3762                 goto free_port;
3763         }
3764
3765         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3766                  phba->shost->host_no);
3767         phba->wq = create_workqueue(phba->wq_name);
3768         if (!phba->wq) {
3769                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3770                                 "Failed to allocate work queue\n");
3771                 goto free_twq;
3772         }
3773
3774         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3775
3776         phwi_ctrlr = phba->phwi_ctrlr;
3777         phwi_context = phwi_ctrlr->phwi_ctxt;
3778         if (blk_iopoll_enabled) {
3779                 for (i = 0; i < phba->num_cpus; i++) {
3780                         pbe_eq = &phwi_context->be_eq[i];
3781                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3782                                         be_iopoll);
3783                         blk_iopoll_enable(&pbe_eq->iopoll);
3784                 }
3785         }
3786         ret = beiscsi_init_irqs(phba);
3787         if (ret < 0) {
3788                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3789                              "Failed to beiscsi_init_irqs\n");
3790                 goto free_blkenbld;
3791         }
3792         ret = hwi_enable_intr(phba);
3793         if (ret < 0) {
3794                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3795                              "Failed to hwi_enable_intr\n");
3796                 goto free_ctrlr;
3797         }
3798         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3799         return 0;
3800
3801 free_ctrlr:
3802         if (phba->msix_enabled) {
3803                 for (i = 0; i <= phba->num_cpus; i++) {
3804                         msix_vec = phba->msix_entries[i].vector;
3805                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3806                 }
3807         } else
3808                 if (phba->pcidev->irq)
3809                         free_irq(phba->pcidev->irq, phba);
3810         pci_disable_msix(phba->pcidev);
3811 free_blkenbld:
3812         destroy_workqueue(phba->wq);
3813         if (blk_iopoll_enabled)
3814                 for (i = 0; i < phba->num_cpus; i++) {
3815                         pbe_eq = &phwi_context->be_eq[i];
3816                         blk_iopoll_disable(&pbe_eq->iopoll);
3817                 }
3818 free_twq:
3819         beiscsi_clean_port(phba);
3820         beiscsi_free_mem(phba);
3821 free_port:
3822         pci_free_consistent(phba->pcidev,
3823                             phba->ctrl.mbox_mem_alloced.size,
3824                             phba->ctrl.mbox_mem_alloced.va,
3825                            phba->ctrl.mbox_mem_alloced.dma);
3826         beiscsi_unmap_pci_function(phba);
3827 hba_free:
3828         iscsi_host_remove(phba->shost);
3829         pci_dev_put(phba->pcidev);
3830         iscsi_host_free(phba->shost);
3831 disable_pci:
3832         pci_disable_device(pcidev);
3833         return ret;
3834 }
3835
3836 struct iscsi_transport beiscsi_iscsi_transport = {
3837         .owner = THIS_MODULE,
3838         .name = DRV_NAME,
3839         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3840                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3841         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3842                 ISCSI_MAX_XMIT_DLENGTH |
3843                 ISCSI_HDRDGST_EN |
3844                 ISCSI_DATADGST_EN |
3845                 ISCSI_INITIAL_R2T_EN |
3846                 ISCSI_MAX_R2T |
3847                 ISCSI_IMM_DATA_EN |
3848                 ISCSI_FIRST_BURST |
3849                 ISCSI_MAX_BURST |
3850                 ISCSI_PDU_INORDER_EN |
3851                 ISCSI_DATASEQ_INORDER_EN |
3852                 ISCSI_ERL |
3853                 ISCSI_CONN_PORT |
3854                 ISCSI_CONN_ADDRESS |
3855                 ISCSI_EXP_STATSN |
3856                 ISCSI_PERSISTENT_PORT |
3857                 ISCSI_PERSISTENT_ADDRESS |
3858                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3859                 ISCSI_USERNAME | ISCSI_PASSWORD |
3860                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3861                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3862                 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
3863                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3864                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3865         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3866                                 ISCSI_HOST_INITIATOR_NAME,
3867         .create_session = beiscsi_session_create,
3868         .destroy_session = beiscsi_session_destroy,
3869         .create_conn = beiscsi_conn_create,
3870         .bind_conn = beiscsi_conn_bind,
3871         .destroy_conn = iscsi_conn_teardown,
3872         .set_param = beiscsi_set_param,
3873         .get_conn_param = beiscsi_conn_get_param,
3874         .get_session_param = iscsi_session_get_param,
3875         .get_host_param = beiscsi_get_host_param,
3876         .start_conn = beiscsi_conn_start,
3877         .stop_conn = beiscsi_conn_stop,
3878         .send_pdu = iscsi_conn_send_pdu,
3879         .xmit_task = beiscsi_task_xmit,
3880         .cleanup_task = beiscsi_cleanup_task,
3881         .alloc_pdu = beiscsi_alloc_pdu,
3882         .parse_pdu_itt = beiscsi_parse_pdu,
3883         .get_stats = beiscsi_conn_get_stats,
3884         .ep_connect = beiscsi_ep_connect,
3885         .ep_poll = beiscsi_ep_poll,
3886         .ep_disconnect = beiscsi_ep_disconnect,
3887         .session_recovery_timedout = iscsi_session_recovery_timedout,
3888 };
3889
3890 static struct pci_driver beiscsi_pci_driver = {
3891         .name = DRV_NAME,
3892         .probe = beiscsi_dev_probe,
3893         .remove = beiscsi_remove,
3894         .id_table = beiscsi_pci_id_table
3895 };
3896
3897
3898 static int __init beiscsi_module_init(void)
3899 {
3900         int ret;
3901
3902         beiscsi_scsi_transport =
3903                         iscsi_register_transport(&beiscsi_iscsi_transport);
3904         if (!beiscsi_scsi_transport) {
3905                 SE_DEBUG(DBG_LVL_1,
3906                          "beiscsi_module_init - Unable to  register beiscsi"
3907                          "transport.\n");
3908                 ret = -ENOMEM;
3909         }
3910         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3911                  &beiscsi_iscsi_transport);
3912
3913         ret = pci_register_driver(&beiscsi_pci_driver);
3914         if (ret) {
3915                 SE_DEBUG(DBG_LVL_1,
3916                          "beiscsi_module_init - Unable to  register"
3917                          "beiscsi pci driver.\n");
3918                 goto unregister_iscsi_transport;
3919         }
3920         ring_mode = 0;
3921         return 0;
3922
3923 unregister_iscsi_transport:
3924         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3925         return ret;
3926 }
3927
3928 static void __exit beiscsi_module_exit(void)
3929 {
3930         pci_unregister_driver(&beiscsi_pci_driver);
3931         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3932 }
3933
3934 module_init(beiscsi_module_init);
3935 module_exit(beiscsi_module_exit);