[S390] fill out file list in s390 MAINTAINERS entry
[safe/jmp/linux-2.6] / drivers / scsi / arcmsr / arcmsr_hba.c
1 /*
2 *******************************************************************************
3 **        O.S   : Linux
4 **   FILE NAME  : arcmsr_hba.c
5 **        BY    : Erich Chen
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA RAID Host adapter
8 *******************************************************************************
9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10 **
11 **     Web site: www.areca.com.tw
12 **       E-mail: support@areca.com.tw
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License version 2 as
16 ** published by the Free Software Foundation.
17 ** This program is distributed in the hope that it will be useful,
18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 ** GNU General Public License for more details.
21 *******************************************************************************
22 ** Redistribution and use in source and binary forms, with or without
23 ** modification, are permitted provided that the following conditions
24 ** are met:
25 ** 1. Redistributions of source code must retain the above copyright
26 **    notice, this list of conditions and the following disclaimer.
27 ** 2. Redistributions in binary form must reproduce the above copyright
28 **    notice, this list of conditions and the following disclaimer in the
29 **    documentation and/or other materials provided with the distribution.
30 ** 3. The name of the author may not be used to endorse or promote products
31 **    derived from this software without specific prior written permission.
32 **
33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 *******************************************************************************
44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45 **     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46 *******************************************************************************
47 */
48 #include <linux/module.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/pci_ids.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
56 #include <linux/delay.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/timer.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/slab.h>
62 #include <asm/dma.h>
63 #include <asm/io.h>
64 #include <asm/system.h>
65 #include <asm/uaccess.h>
66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi.h>
68 #include <scsi/scsi_cmnd.h>
69 #include <scsi/scsi_tcq.h>
70 #include <scsi/scsi_device.h>
71 #include <scsi/scsi_transport.h>
72 #include <scsi/scsicam.h>
73 #include "arcmsr.h"
74
75 MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
76 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
77 MODULE_LICENSE("Dual BSD/GPL");
78 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
79
80 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
81                                         struct scsi_cmnd *cmd);
82 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
83 static int arcmsr_abort(struct scsi_cmnd *);
84 static int arcmsr_bus_reset(struct scsi_cmnd *);
85 static int arcmsr_bios_param(struct scsi_device *sdev,
86                 struct block_device *bdev, sector_t capacity, int *info);
87 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
88                                         void (*done) (struct scsi_cmnd *));
89 static int arcmsr_probe(struct pci_dev *pdev,
90                                 const struct pci_device_id *id);
91 static void arcmsr_remove(struct pci_dev *pdev);
92 static void arcmsr_shutdown(struct pci_dev *pdev);
93 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
94 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
95 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
96 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
97 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
98 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
99 static const char *arcmsr_info(struct Scsi_Host *);
100 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
101 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
102                                           int queue_depth, int reason)
103 {
104         if (reason != SCSI_QDEPTH_DEFAULT)
105                 return -EOPNOTSUPP;
106
107         if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
108                 queue_depth = ARCMSR_MAX_CMD_PERLUN;
109         scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
110         return queue_depth;
111 }
112
113 static struct scsi_host_template arcmsr_scsi_host_template = {
114         .module                 = THIS_MODULE,
115         .name                   = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
116                                                         ARCMSR_DRIVER_VERSION,
117         .info                   = arcmsr_info,
118         .queuecommand           = arcmsr_queue_command,
119         .eh_abort_handler       = arcmsr_abort,
120         .eh_bus_reset_handler   = arcmsr_bus_reset,
121         .bios_param             = arcmsr_bios_param,
122         .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
123         .can_queue              = ARCMSR_MAX_OUTSTANDING_CMD,
124         .this_id                = ARCMSR_SCSI_INITIATOR_ID,
125         .sg_tablesize           = ARCMSR_MAX_SG_ENTRIES,
126         .max_sectors            = ARCMSR_MAX_XFER_SECTORS,
127         .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
128         .use_clustering         = ENABLE_CLUSTERING,
129         .shost_attrs            = arcmsr_host_attrs,
130 };
131 #ifdef CONFIG_SCSI_ARCMSR_AER
132 static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
133 static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
134                                                 pci_channel_state_t state);
135
136 static struct pci_error_handlers arcmsr_pci_error_handlers = {
137         .error_detected         = arcmsr_pci_error_detected,
138         .slot_reset             = arcmsr_pci_slot_reset,
139 };
140 #endif
141 static struct pci_device_id arcmsr_device_id_table[] = {
142         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
143         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
144         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
145         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
146         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
147         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
148         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
149         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
150         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
151         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
152         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
153         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
154         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
155         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
156         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
157         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
158         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
159         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
160         {0, 0}, /* Terminating entry */
161 };
162 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
163 static struct pci_driver arcmsr_pci_driver = {
164         .name                   = "arcmsr",
165         .id_table               = arcmsr_device_id_table,
166         .probe                  = arcmsr_probe,
167         .remove                 = arcmsr_remove,
168         .shutdown               = arcmsr_shutdown,
169         #ifdef CONFIG_SCSI_ARCMSR_AER
170         .err_handler            = &arcmsr_pci_error_handlers,
171         #endif
172 };
173
174 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
175 {
176         irqreturn_t handle_state;
177         struct AdapterControlBlock *acb = dev_id;
178
179         spin_lock(acb->host->host_lock);
180         handle_state = arcmsr_interrupt(acb);
181         spin_unlock(acb->host->host_lock);
182
183         return handle_state;
184 }
185
186 static int arcmsr_bios_param(struct scsi_device *sdev,
187                 struct block_device *bdev, sector_t capacity, int *geom)
188 {
189         int ret, heads, sectors, cylinders, total_capacity;
190         unsigned char *buffer;/* return copy of block device's partition table */
191
192         buffer = scsi_bios_ptable(bdev);
193         if (buffer) {
194                 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
195                 kfree(buffer);
196                 if (ret != -1)
197                         return ret;
198         }
199         total_capacity = capacity;
200         heads = 64;
201         sectors = 32;
202         cylinders = total_capacity / (heads * sectors);
203         if (cylinders > 1024) {
204                 heads = 255;
205                 sectors = 63;
206                 cylinders = total_capacity / (heads * sectors);
207         }
208         geom[0] = heads;
209         geom[1] = sectors;
210         geom[2] = cylinders;
211         return 0;
212 }
213
214 static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
215 {
216         struct pci_dev *pdev = acb->pdev;
217         u16 dev_id;
218         pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
219         switch (dev_id) {
220         case 0x1201 : {
221                 acb->adapter_type = ACB_ADAPTER_TYPE_B;
222                 }
223                 break;
224
225         default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
226         }
227 }
228
229 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
230 {
231
232         switch (acb->adapter_type) {
233
234         case ACB_ADAPTER_TYPE_A: {
235                 struct pci_dev *pdev = acb->pdev;
236                 void *dma_coherent;
237                 dma_addr_t dma_coherent_handle, dma_addr;
238                 struct CommandControlBlock *ccb_tmp;
239                 uint32_t intmask_org;
240                 int i, j;
241
242                 acb->pmuA = pci_ioremap_bar(pdev, 0);
243                 if (!acb->pmuA) {
244                         printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
245                                                         acb->host->host_no);
246                         return -ENOMEM;
247                 }
248
249                 dma_coherent = dma_alloc_coherent(&pdev->dev,
250                         ARCMSR_MAX_FREECCB_NUM *
251                         sizeof (struct CommandControlBlock) + 0x20,
252                         &dma_coherent_handle, GFP_KERNEL);
253
254                 if (!dma_coherent) {
255                         iounmap(acb->pmuA);
256                         return -ENOMEM;
257                 }
258
259                 acb->dma_coherent = dma_coherent;
260                 acb->dma_coherent_handle = dma_coherent_handle;
261
262                 if (((unsigned long)dma_coherent & 0x1F)) {
263                         dma_coherent = dma_coherent +
264                                 (0x20 - ((unsigned long)dma_coherent & 0x1F));
265                         dma_coherent_handle = dma_coherent_handle +
266                                 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
267                 }
268
269                 dma_addr = dma_coherent_handle;
270                 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
271                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
272                         ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
273                         ccb_tmp->acb = acb;
274                         acb->pccb_pool[i] = ccb_tmp;
275                         list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
276                         dma_addr = dma_addr + sizeof(struct CommandControlBlock);
277                         ccb_tmp++;
278                 }
279
280                 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
281                 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
282                         for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
283                                 acb->devstate[i][j] = ARECA_RAID_GONE;
284
285                 /*
286                 ** here we need to tell iop 331 our ccb_tmp.HighPart
287                 ** if ccb_tmp.HighPart is not zero
288                 */
289                 intmask_org = arcmsr_disable_outbound_ints(acb);
290                 }
291                 break;
292
293         case ACB_ADAPTER_TYPE_B: {
294
295                 struct pci_dev *pdev = acb->pdev;
296                 struct MessageUnit_B *reg;
297                 void __iomem *mem_base0, *mem_base1;
298                 void *dma_coherent;
299                 dma_addr_t dma_coherent_handle, dma_addr;
300                 uint32_t intmask_org;
301                 struct CommandControlBlock *ccb_tmp;
302                 int i, j;
303
304                 dma_coherent = dma_alloc_coherent(&pdev->dev,
305                         ((ARCMSR_MAX_FREECCB_NUM *
306                         sizeof(struct CommandControlBlock) + 0x20) +
307                         sizeof(struct MessageUnit_B)),
308                         &dma_coherent_handle, GFP_KERNEL);
309                 if (!dma_coherent)
310                         return -ENOMEM;
311
312                 acb->dma_coherent = dma_coherent;
313                 acb->dma_coherent_handle = dma_coherent_handle;
314
315                 if (((unsigned long)dma_coherent & 0x1F)) {
316                         dma_coherent = dma_coherent +
317                                 (0x20 - ((unsigned long)dma_coherent & 0x1F));
318                         dma_coherent_handle = dma_coherent_handle +
319                                 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
320                 }
321
322                 dma_addr = dma_coherent_handle;
323                 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
324                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
325                         ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
326                         ccb_tmp->acb = acb;
327                         acb->pccb_pool[i] = ccb_tmp;
328                         list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
329                         dma_addr = dma_addr + sizeof(struct CommandControlBlock);
330                         ccb_tmp++;
331                 }
332
333                 reg = (struct MessageUnit_B *)(dma_coherent +
334                 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
335                 acb->pmuB = reg;
336                 mem_base0 = pci_ioremap_bar(pdev, 0);
337                 if (!mem_base0)
338                         goto out;
339
340                 mem_base1 = pci_ioremap_bar(pdev, 2);
341                 if (!mem_base1) {
342                         iounmap(mem_base0);
343                         goto out;
344                 }
345
346                 reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
347                 reg->drv2iop_doorbell_mask_reg = mem_base0 +
348                                                 ARCMSR_DRV2IOP_DOORBELL_MASK;
349                 reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
350                 reg->iop2drv_doorbell_mask_reg = mem_base0 +
351                                                 ARCMSR_IOP2DRV_DOORBELL_MASK;
352                 reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
353                 reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
354                 reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
355
356                 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
357                 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
358                         for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
359                                 acb->devstate[i][j] = ARECA_RAID_GOOD;
360
361                 /*
362                 ** here we need to tell iop 331 our ccb_tmp.HighPart
363                 ** if ccb_tmp.HighPart is not zero
364                 */
365                 intmask_org = arcmsr_disable_outbound_ints(acb);
366                 }
367                 break;
368         }
369         return 0;
370
371 out:
372         dma_free_coherent(&acb->pdev->dev,
373                 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
374                 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
375         return -ENOMEM;
376 }
377
378 static int arcmsr_probe(struct pci_dev *pdev,
379         const struct pci_device_id *id)
380 {
381         struct Scsi_Host *host;
382         struct AdapterControlBlock *acb;
383         uint8_t bus, dev_fun;
384         int error;
385
386         error = pci_enable_device(pdev);
387         if (error)
388                 goto out;
389         pci_set_master(pdev);
390
391         host = scsi_host_alloc(&arcmsr_scsi_host_template,
392                         sizeof(struct AdapterControlBlock));
393         if (!host) {
394                 error = -ENOMEM;
395                 goto out_disable_device;
396         }
397         acb = (struct AdapterControlBlock *)host->hostdata;
398         memset(acb, 0, sizeof (struct AdapterControlBlock));
399
400         error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
401         if (error) {
402                 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
403                 if (error) {
404                         printk(KERN_WARNING
405                                "scsi%d: No suitable DMA mask available\n",
406                                host->host_no);
407                         goto out_host_put;
408                 }
409         }
410         bus = pdev->bus->number;
411         dev_fun = pdev->devfn;
412         acb->host = host;
413         acb->pdev = pdev;
414         host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
415         host->max_lun = ARCMSR_MAX_TARGETLUN;
416         host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
417         host->max_cmd_len = 16;    /*this is issue of 64bit LBA, over 2T byte*/
418         host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
419         host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
420         host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
421         host->this_id = ARCMSR_SCSI_INITIATOR_ID;
422         host->unique_id = (bus << 8) | dev_fun;
423         host->irq = pdev->irq;
424         error = pci_request_regions(pdev, "arcmsr");
425         if (error) {
426                 goto out_host_put;
427         }
428         arcmsr_define_adapter_type(acb);
429
430         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
431                            ACB_F_MESSAGE_RQBUFFER_CLEARED |
432                            ACB_F_MESSAGE_WQBUFFER_READED);
433         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
434         INIT_LIST_HEAD(&acb->ccb_free_list);
435
436         error = arcmsr_alloc_ccb_pool(acb);
437         if (error)
438                 goto out_release_regions;
439
440         error = request_irq(pdev->irq, arcmsr_do_interrupt,
441                             IRQF_SHARED, "arcmsr", acb);
442         if (error)
443                 goto out_free_ccb_pool;
444
445         arcmsr_iop_init(acb);
446         pci_set_drvdata(pdev, host);
447         if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
448                 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
449
450         error = scsi_add_host(host, &pdev->dev);
451         if (error)
452                 goto out_free_irq;
453
454         error = arcmsr_alloc_sysfs_attr(acb);
455         if (error)
456                 goto out_free_sysfs;
457
458         scsi_scan_host(host);
459         #ifdef CONFIG_SCSI_ARCMSR_AER
460         pci_enable_pcie_error_reporting(pdev);
461         #endif
462         return 0;
463  out_free_sysfs:
464  out_free_irq:
465         free_irq(pdev->irq, acb);
466  out_free_ccb_pool:
467         arcmsr_free_ccb_pool(acb);
468  out_release_regions:
469         pci_release_regions(pdev);
470  out_host_put:
471         scsi_host_put(host);
472  out_disable_device:
473         pci_disable_device(pdev);
474  out:
475         return error;
476 }
477
478 static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
479 {
480         struct MessageUnit_A __iomem *reg = acb->pmuA;
481         uint32_t Index;
482         uint8_t Retries = 0x00;
483
484         do {
485                 for (Index = 0; Index < 100; Index++) {
486                         if (readl(&reg->outbound_intstatus) &
487                                         ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
488                                 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
489                                         &reg->outbound_intstatus);
490                                 return 0x00;
491                         }
492                         msleep(10);
493                 }/*max 1 seconds*/
494
495         } while (Retries++ < 20);/*max 20 sec*/
496         return 0xff;
497 }
498
499 static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
500 {
501         struct MessageUnit_B *reg = acb->pmuB;
502         uint32_t Index;
503         uint8_t Retries = 0x00;
504
505         do {
506                 for (Index = 0; Index < 100; Index++) {
507                         if (readl(reg->iop2drv_doorbell_reg)
508                                 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
509                                 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
510                                         , reg->iop2drv_doorbell_reg);
511                                 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
512                                 return 0x00;
513                         }
514                         msleep(10);
515                 }/*max 1 seconds*/
516
517         } while (Retries++ < 20);/*max 20 sec*/
518         return 0xff;
519 }
520
521 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
522 {
523         struct MessageUnit_A __iomem *reg = acb->pmuA;
524
525         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
526         if (arcmsr_hba_wait_msgint_ready(acb))
527                 printk(KERN_NOTICE
528                         "arcmsr%d: wait 'abort all outstanding command' timeout \n"
529                         , acb->host->host_no);
530 }
531
532 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
533 {
534         struct MessageUnit_B *reg = acb->pmuB;
535
536         writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
537         if (arcmsr_hbb_wait_msgint_ready(acb))
538                 printk(KERN_NOTICE
539                         "arcmsr%d: wait 'abort all outstanding command' timeout \n"
540                         , acb->host->host_no);
541 }
542
543 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
544 {
545         switch (acb->adapter_type) {
546         case ACB_ADAPTER_TYPE_A: {
547                 arcmsr_abort_hba_allcmd(acb);
548                 }
549                 break;
550
551         case ACB_ADAPTER_TYPE_B: {
552                 arcmsr_abort_hbb_allcmd(acb);
553                 }
554         }
555 }
556
557 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
558 {
559         struct scsi_cmnd *pcmd = ccb->pcmd;
560
561         scsi_dma_unmap(pcmd);
562 }
563
564 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
565 {
566         struct AdapterControlBlock *acb = ccb->acb;
567         struct scsi_cmnd *pcmd = ccb->pcmd;
568
569         arcmsr_pci_unmap_dma(ccb);
570         if (stand_flag == 1)
571                 atomic_dec(&acb->ccboutstandingcount);
572         ccb->startdone = ARCMSR_CCB_DONE;
573         ccb->ccb_flags = 0;
574         list_add_tail(&ccb->list, &acb->ccb_free_list);
575         pcmd->scsi_done(pcmd);
576 }
577
578 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
579 {
580         struct MessageUnit_A __iomem *reg = acb->pmuA;
581         int retry_count = 30;
582
583         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
584         do {
585                 if (!arcmsr_hba_wait_msgint_ready(acb))
586                         break;
587                 else {
588                         retry_count--;
589                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
590                         timeout, retry count down = %d \n", acb->host->host_no, retry_count);
591                 }
592         } while (retry_count != 0);
593 }
594
595 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
596 {
597         struct MessageUnit_B *reg = acb->pmuB;
598         int retry_count = 30;
599
600         writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
601         do {
602                 if (!arcmsr_hbb_wait_msgint_ready(acb))
603                         break;
604                 else {
605                         retry_count--;
606                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
607                         timeout,retry count down = %d \n", acb->host->host_no, retry_count);
608                 }
609         } while (retry_count != 0);
610 }
611
612 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
613 {
614         switch (acb->adapter_type) {
615
616         case ACB_ADAPTER_TYPE_A: {
617                 arcmsr_flush_hba_cache(acb);
618                 }
619                 break;
620
621         case ACB_ADAPTER_TYPE_B: {
622                 arcmsr_flush_hbb_cache(acb);
623                 }
624         }
625 }
626
627 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
628 {
629
630         struct scsi_cmnd *pcmd = ccb->pcmd;
631         struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
632
633         pcmd->result = DID_OK << 16;
634         if (sensebuffer) {
635                 int sense_data_length =
636                         sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
637                         ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
638                 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
639                 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
640                 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
641                 sensebuffer->Valid = 1;
642         }
643 }
644
645 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
646 {
647         u32 orig_mask = 0;
648         switch (acb->adapter_type) {
649
650         case ACB_ADAPTER_TYPE_A : {
651                 struct MessageUnit_A __iomem *reg = acb->pmuA;
652                 orig_mask = readl(&reg->outbound_intmask)|\
653                                 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
654                 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
655                                                 &reg->outbound_intmask);
656                 }
657                 break;
658
659         case ACB_ADAPTER_TYPE_B : {
660                 struct MessageUnit_B *reg = acb->pmuB;
661                 orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
662                                         (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
663                 writel(0, reg->iop2drv_doorbell_mask_reg);
664                 }
665                 break;
666         }
667         return orig_mask;
668 }
669
670 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
671                         struct CommandControlBlock *ccb, uint32_t flag_ccb)
672 {
673
674         uint8_t id, lun;
675         id = ccb->pcmd->device->id;
676         lun = ccb->pcmd->device->lun;
677         if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
678                 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
679                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
680                         ccb->pcmd->result = DID_OK << 16;
681                         arcmsr_ccb_complete(ccb, 1);
682         } else {
683                 switch (ccb->arcmsr_cdb.DeviceStatus) {
684                 case ARCMSR_DEV_SELECT_TIMEOUT: {
685                         acb->devstate[id][lun] = ARECA_RAID_GONE;
686                         ccb->pcmd->result = DID_NO_CONNECT << 16;
687                         arcmsr_ccb_complete(ccb, 1);
688                         }
689                         break;
690
691                 case ARCMSR_DEV_ABORTED:
692
693                 case ARCMSR_DEV_INIT_FAIL: {
694                         acb->devstate[id][lun] = ARECA_RAID_GONE;
695                         ccb->pcmd->result = DID_BAD_TARGET << 16;
696                         arcmsr_ccb_complete(ccb, 1);
697                         }
698                         break;
699
700                 case ARCMSR_DEV_CHECK_CONDITION: {
701                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
702                         arcmsr_report_sense_info(ccb);
703                         arcmsr_ccb_complete(ccb, 1);
704                         }
705                         break;
706
707                 default:
708                                 printk(KERN_NOTICE
709                                         "arcmsr%d: scsi id = %d lun = %d"
710                                         " isr get command error done, "
711                                         "but got unknown DeviceStatus = 0x%x \n"
712                                         , acb->host->host_no
713                                         , id
714                                         , lun
715                                         , ccb->arcmsr_cdb.DeviceStatus);
716                                         acb->devstate[id][lun] = ARECA_RAID_GONE;
717                                         ccb->pcmd->result = DID_NO_CONNECT << 16;
718                                         arcmsr_ccb_complete(ccb, 1);
719                         break;
720                 }
721         }
722 }
723
724 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
725
726 {
727         struct CommandControlBlock *ccb;
728
729         ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
730         if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
731                 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
732                         struct scsi_cmnd *abortcmd = ccb->pcmd;
733                         if (abortcmd) {
734                                 abortcmd->result |= DID_ABORT << 16;
735                                 arcmsr_ccb_complete(ccb, 1);
736                                 printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
737                                 isr got aborted command \n", acb->host->host_no, ccb);
738                         }
739                 }
740                 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
741                                 done acb = '0x%p'"
742                                 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
743                                 " ccboutstandingcount = %d \n"
744                                 , acb->host->host_no
745                                 , acb
746                                 , ccb
747                                 , ccb->acb
748                                 , ccb->startdone
749                                 , atomic_read(&acb->ccboutstandingcount));
750                 }
751         else
752         arcmsr_report_ccb_state(acb, ccb, flag_ccb);
753 }
754
755 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
756 {
757         int i = 0;
758         uint32_t flag_ccb;
759
760         switch (acb->adapter_type) {
761
762         case ACB_ADAPTER_TYPE_A: {
763                 struct MessageUnit_A __iomem *reg = acb->pmuA;
764                 uint32_t outbound_intstatus;
765                 outbound_intstatus = readl(&reg->outbound_intstatus) &
766                                         acb->outbound_int_enable;
767                 /*clear and abort all outbound posted Q*/
768                 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
769                 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
770                                 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
771                         arcmsr_drain_donequeue(acb, flag_ccb);
772                 }
773                 }
774                 break;
775
776         case ACB_ADAPTER_TYPE_B: {
777                 struct MessageUnit_B *reg = acb->pmuB;
778                 /*clear all outbound posted Q*/
779                 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
780                         if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
781                                 writel(0, &reg->done_qbuffer[i]);
782                                 arcmsr_drain_donequeue(acb, flag_ccb);
783                         }
784                         writel(0, &reg->post_qbuffer[i]);
785                 }
786                 reg->doneq_index = 0;
787                 reg->postq_index = 0;
788                 }
789                 break;
790         }
791 }
792 static void arcmsr_remove(struct pci_dev *pdev)
793 {
794         struct Scsi_Host *host = pci_get_drvdata(pdev);
795         struct AdapterControlBlock *acb =
796                 (struct AdapterControlBlock *) host->hostdata;
797         int poll_count = 0;
798
799         arcmsr_free_sysfs_attr(acb);
800         scsi_remove_host(host);
801         arcmsr_stop_adapter_bgrb(acb);
802         arcmsr_flush_adapter_cache(acb);
803         arcmsr_disable_outbound_ints(acb);
804         acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
805         acb->acb_flags &= ~ACB_F_IOP_INITED;
806
807         for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
808                 if (!atomic_read(&acb->ccboutstandingcount))
809                         break;
810                 arcmsr_interrupt(acb);/* FIXME: need spinlock */
811                 msleep(25);
812         }
813
814         if (atomic_read(&acb->ccboutstandingcount)) {
815                 int i;
816
817                 arcmsr_abort_allcmd(acb);
818                 arcmsr_done4abort_postqueue(acb);
819                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
820                         struct CommandControlBlock *ccb = acb->pccb_pool[i];
821                         if (ccb->startdone == ARCMSR_CCB_START) {
822                                 ccb->startdone = ARCMSR_CCB_ABORTED;
823                                 ccb->pcmd->result = DID_ABORT << 16;
824                                 arcmsr_ccb_complete(ccb, 1);
825                         }
826                 }
827         }
828
829         free_irq(pdev->irq, acb);
830         arcmsr_free_ccb_pool(acb);
831         pci_release_regions(pdev);
832
833         scsi_host_put(host);
834
835         pci_disable_device(pdev);
836         pci_set_drvdata(pdev, NULL);
837 }
838
839 static void arcmsr_shutdown(struct pci_dev *pdev)
840 {
841         struct Scsi_Host *host = pci_get_drvdata(pdev);
842         struct AdapterControlBlock *acb =
843                 (struct AdapterControlBlock *)host->hostdata;
844
845         arcmsr_stop_adapter_bgrb(acb);
846         arcmsr_flush_adapter_cache(acb);
847 }
848
849 static int arcmsr_module_init(void)
850 {
851         int error = 0;
852
853         error = pci_register_driver(&arcmsr_pci_driver);
854         return error;
855 }
856
857 static void arcmsr_module_exit(void)
858 {
859         pci_unregister_driver(&arcmsr_pci_driver);
860 }
861 module_init(arcmsr_module_init);
862 module_exit(arcmsr_module_exit);
863
864 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
865                                                 u32 intmask_org)
866 {
867         u32 mask;
868
869         switch (acb->adapter_type) {
870
871         case ACB_ADAPTER_TYPE_A : {
872                 struct MessageUnit_A __iomem *reg = acb->pmuA;
873                 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
874                              ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
875                 writel(mask, &reg->outbound_intmask);
876                 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
877                 }
878                 break;
879
880         case ACB_ADAPTER_TYPE_B : {
881                 struct MessageUnit_B *reg = acb->pmuB;
882                 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
883                         ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
884                 writel(mask, reg->iop2drv_doorbell_mask_reg);
885                 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
886                 }
887         }
888 }
889
890 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
891         struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
892 {
893         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
894         int8_t *psge = (int8_t *)&arcmsr_cdb->u;
895         __le32 address_lo, address_hi;
896         int arccdbsize = 0x30;
897         int nseg;
898
899         ccb->pcmd = pcmd;
900         memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
901         arcmsr_cdb->Bus = 0;
902         arcmsr_cdb->TargetID = pcmd->device->id;
903         arcmsr_cdb->LUN = pcmd->device->lun;
904         arcmsr_cdb->Function = 1;
905         arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
906         arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
907         memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
908
909         nseg = scsi_dma_map(pcmd);
910         if (nseg > ARCMSR_MAX_SG_ENTRIES)
911                 return FAILED;
912         BUG_ON(nseg < 0);
913
914         if (nseg) {
915                 __le32 length;
916                 int i, cdb_sgcount = 0;
917                 struct scatterlist *sg;
918
919                 /* map stor port SG list to our iop SG List. */
920                 scsi_for_each_sg(pcmd, sg, nseg, i) {
921                         /* Get the physical address of the current data pointer */
922                         length = cpu_to_le32(sg_dma_len(sg));
923                         address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
924                         address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
925                         if (address_hi == 0) {
926                                 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
927
928                                 pdma_sg->address = address_lo;
929                                 pdma_sg->length = length;
930                                 psge += sizeof (struct SG32ENTRY);
931                                 arccdbsize += sizeof (struct SG32ENTRY);
932                         } else {
933                                 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
934
935                                 pdma_sg->addresshigh = address_hi;
936                                 pdma_sg->address = address_lo;
937                                 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
938                                 psge += sizeof (struct SG64ENTRY);
939                                 arccdbsize += sizeof (struct SG64ENTRY);
940                         }
941                         cdb_sgcount++;
942                 }
943                 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
944                 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
945                 if ( arccdbsize > 256)
946                         arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
947         }
948         if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
949                 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
950                 ccb->ccb_flags |= CCB_FLAG_WRITE;
951         }
952         return SUCCESS;
953 }
954
955 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
956 {
957         uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
958         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
959         atomic_inc(&acb->ccboutstandingcount);
960         ccb->startdone = ARCMSR_CCB_START;
961
962         switch (acb->adapter_type) {
963         case ACB_ADAPTER_TYPE_A: {
964                 struct MessageUnit_A __iomem *reg = acb->pmuA;
965
966                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
967                         writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
968                         &reg->inbound_queueport);
969                 else {
970                                 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
971                 }
972                 }
973                 break;
974
975         case ACB_ADAPTER_TYPE_B: {
976                 struct MessageUnit_B *reg = acb->pmuB;
977                 uint32_t ending_index, index = reg->postq_index;
978
979                 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
980                 writel(0, &reg->post_qbuffer[ending_index]);
981                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
982                         writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
983                                                  &reg->post_qbuffer[index]);
984                 }
985                 else {
986                         writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
987                 }
988                 index++;
989                 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
990                 reg->postq_index = index;
991                 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
992                 }
993                 break;
994         }
995 }
996
997 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
998 {
999         struct MessageUnit_A __iomem *reg = acb->pmuA;
1000         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1001         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1002
1003         if (arcmsr_hba_wait_msgint_ready(acb)) {
1004                 printk(KERN_NOTICE
1005                         "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1006                         , acb->host->host_no);
1007         }
1008 }
1009
1010 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1011 {
1012         struct MessageUnit_B *reg = acb->pmuB;
1013         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1014         writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
1015
1016         if (arcmsr_hbb_wait_msgint_ready(acb)) {
1017                 printk(KERN_NOTICE
1018                         "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1019                         , acb->host->host_no);
1020         }
1021 }
1022
1023 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1024 {
1025         switch (acb->adapter_type) {
1026         case ACB_ADAPTER_TYPE_A: {
1027                 arcmsr_stop_hba_bgrb(acb);
1028                 }
1029                 break;
1030
1031         case ACB_ADAPTER_TYPE_B: {
1032                 arcmsr_stop_hbb_bgrb(acb);
1033                 }
1034                 break;
1035         }
1036 }
1037
1038 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1039 {
1040         switch (acb->adapter_type) {
1041         case ACB_ADAPTER_TYPE_A: {
1042                 iounmap(acb->pmuA);
1043                 dma_free_coherent(&acb->pdev->dev,
1044                 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1045                 acb->dma_coherent,
1046                 acb->dma_coherent_handle);
1047                 break;
1048         }
1049         case ACB_ADAPTER_TYPE_B: {
1050                 struct MessageUnit_B *reg = acb->pmuB;
1051                 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1052                 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1053                 dma_free_coherent(&acb->pdev->dev,
1054                 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1055                 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
1056         }
1057         }
1058
1059 }
1060
1061 void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1062 {
1063         switch (acb->adapter_type) {
1064         case ACB_ADAPTER_TYPE_A: {
1065                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1066                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1067                 }
1068                 break;
1069
1070         case ACB_ADAPTER_TYPE_B: {
1071                 struct MessageUnit_B *reg = acb->pmuB;
1072                 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
1073                 }
1074                 break;
1075         }
1076 }
1077
1078 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1079 {
1080         switch (acb->adapter_type) {
1081         case ACB_ADAPTER_TYPE_A: {
1082                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1083                 /*
1084                 ** push inbound doorbell tell iop, driver data write ok
1085                 ** and wait reply on next hwinterrupt for next Qbuffer post
1086                 */
1087                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
1088                 }
1089                 break;
1090
1091         case ACB_ADAPTER_TYPE_B: {
1092                 struct MessageUnit_B *reg = acb->pmuB;
1093                 /*
1094                 ** push inbound doorbell tell iop, driver data write ok
1095                 ** and wait reply on next hwinterrupt for next Qbuffer post
1096                 */
1097                 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
1098                 }
1099                 break;
1100         }
1101 }
1102
1103 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1104 {
1105         struct QBUFFER __iomem *qbuffer = NULL;
1106
1107         switch (acb->adapter_type) {
1108
1109         case ACB_ADAPTER_TYPE_A: {
1110                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1111                 qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
1112                 }
1113                 break;
1114
1115         case ACB_ADAPTER_TYPE_B: {
1116                 struct MessageUnit_B *reg = acb->pmuB;
1117                 qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
1118                 }
1119                 break;
1120         }
1121         return qbuffer;
1122 }
1123
1124 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1125 {
1126         struct QBUFFER __iomem *pqbuffer = NULL;
1127
1128         switch (acb->adapter_type) {
1129
1130         case ACB_ADAPTER_TYPE_A: {
1131                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1132                 pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
1133                 }
1134                 break;
1135
1136         case ACB_ADAPTER_TYPE_B: {
1137                 struct MessageUnit_B  *reg = acb->pmuB;
1138                 pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
1139                 }
1140                 break;
1141         }
1142         return pqbuffer;
1143 }
1144
1145 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1146 {
1147         struct QBUFFER __iomem *prbuffer;
1148         struct QBUFFER *pQbuffer;
1149         uint8_t __iomem *iop_data;
1150         int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1151
1152         rqbuf_lastindex = acb->rqbuf_lastindex;
1153         rqbuf_firstindex = acb->rqbuf_firstindex;
1154         prbuffer = arcmsr_get_iop_rqbuffer(acb);
1155         iop_data = (uint8_t __iomem *)prbuffer->data;
1156         iop_len = prbuffer->data_len;
1157         my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
1158
1159         if (my_empty_len >= iop_len)
1160         {
1161                 while (iop_len > 0) {
1162                         pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
1163                         memcpy(pQbuffer, iop_data,1);
1164                         rqbuf_lastindex++;
1165                         rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1166                         iop_data++;
1167                         iop_len--;
1168                 }
1169                 acb->rqbuf_lastindex = rqbuf_lastindex;
1170                 arcmsr_iop_message_read(acb);
1171         }
1172
1173         else {
1174                 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1175         }
1176 }
1177
1178 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1179 {
1180         acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1181         if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1182                 uint8_t *pQbuffer;
1183                 struct QBUFFER __iomem *pwbuffer;
1184                 uint8_t __iomem *iop_data;
1185                 int32_t allxfer_len = 0;
1186
1187                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1188                 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1189                 iop_data = (uint8_t __iomem *)pwbuffer->data;
1190
1191                 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
1192                                                         (allxfer_len < 124)) {
1193                         pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1194                         memcpy(iop_data, pQbuffer, 1);
1195                         acb->wqbuf_firstindex++;
1196                         acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1197                         iop_data++;
1198                         allxfer_len++;
1199                 }
1200                 pwbuffer->data_len = allxfer_len;
1201
1202                 arcmsr_iop_message_wrote(acb);
1203         }
1204
1205         if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1206                 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1207         }
1208 }
1209
1210 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1211 {
1212         uint32_t outbound_doorbell;
1213         struct MessageUnit_A __iomem *reg = acb->pmuA;
1214
1215         outbound_doorbell = readl(&reg->outbound_doorbell);
1216         writel(outbound_doorbell, &reg->outbound_doorbell);
1217         if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1218                 arcmsr_iop2drv_data_wrote_handle(acb);
1219         }
1220
1221         if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)    {
1222                 arcmsr_iop2drv_data_read_handle(acb);
1223         }
1224 }
1225
1226 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1227 {
1228         uint32_t flag_ccb;
1229         struct MessageUnit_A __iomem *reg = acb->pmuA;
1230
1231         while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1232                 arcmsr_drain_donequeue(acb, flag_ccb);
1233         }
1234 }
1235
1236 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1237 {
1238         uint32_t index;
1239         uint32_t flag_ccb;
1240         struct MessageUnit_B *reg = acb->pmuB;
1241
1242         index = reg->doneq_index;
1243
1244         while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
1245                 writel(0, &reg->done_qbuffer[index]);
1246                 arcmsr_drain_donequeue(acb, flag_ccb);
1247                 index++;
1248                 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1249                 reg->doneq_index = index;
1250         }
1251 }
1252
1253 static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1254 {
1255         uint32_t outbound_intstatus;
1256         struct MessageUnit_A __iomem *reg = acb->pmuA;
1257
1258         outbound_intstatus = readl(&reg->outbound_intstatus) & \
1259                                                         acb->outbound_int_enable;
1260         if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))      {
1261                 return 1;
1262         }
1263         writel(outbound_intstatus, &reg->outbound_intstatus);
1264         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)       {
1265                 arcmsr_hba_doorbell_isr(acb);
1266         }
1267         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1268                 arcmsr_hba_postqueue_isr(acb);
1269         }
1270         return 0;
1271 }
1272
1273 static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1274 {
1275         uint32_t outbound_doorbell;
1276         struct MessageUnit_B *reg = acb->pmuB;
1277
1278         outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
1279                                                         acb->outbound_int_enable;
1280         if (!outbound_doorbell)
1281                 return 1;
1282
1283         writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1284         /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
1285         readl(reg->iop2drv_doorbell_reg);
1286         writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1287         if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)   {
1288                 arcmsr_iop2drv_data_wrote_handle(acb);
1289         }
1290         if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1291                 arcmsr_iop2drv_data_read_handle(acb);
1292         }
1293         if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1294                 arcmsr_hbb_postqueue_isr(acb);
1295         }
1296
1297         return 0;
1298 }
1299
1300 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1301 {
1302         switch (acb->adapter_type) {
1303         case ACB_ADAPTER_TYPE_A: {
1304                 if (arcmsr_handle_hba_isr(acb)) {
1305                         return IRQ_NONE;
1306                 }
1307                 }
1308                 break;
1309
1310         case ACB_ADAPTER_TYPE_B: {
1311                 if (arcmsr_handle_hbb_isr(acb)) {
1312                         return IRQ_NONE;
1313                 }
1314                 }
1315                 break;
1316         }
1317         return IRQ_HANDLED;
1318 }
1319
1320 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1321 {
1322         if (acb) {
1323                 /* stop adapter background rebuild */
1324                 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
1325                         uint32_t intmask_org;
1326                         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1327                         intmask_org = arcmsr_disable_outbound_ints(acb);
1328                         arcmsr_stop_adapter_bgrb(acb);
1329                         arcmsr_flush_adapter_cache(acb);
1330                         arcmsr_enable_outbound_ints(acb, intmask_org);
1331                 }
1332         }
1333 }
1334
1335 void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1336 {
1337         int32_t wqbuf_firstindex, wqbuf_lastindex;
1338         uint8_t *pQbuffer;
1339         struct QBUFFER __iomem *pwbuffer;
1340         uint8_t __iomem *iop_data;
1341         int32_t allxfer_len = 0;
1342
1343         pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1344         iop_data = (uint8_t __iomem *)pwbuffer->data;
1345         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1346                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1347                 wqbuf_firstindex = acb->wqbuf_firstindex;
1348                 wqbuf_lastindex = acb->wqbuf_lastindex;
1349                 while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
1350                         pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
1351                         memcpy(iop_data, pQbuffer, 1);
1352                         wqbuf_firstindex++;
1353                         wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1354                         iop_data++;
1355                         allxfer_len++;
1356                 }
1357                 acb->wqbuf_firstindex = wqbuf_firstindex;
1358                 pwbuffer->data_len = allxfer_len;
1359                 arcmsr_iop_message_wrote(acb);
1360         }
1361 }
1362
1363 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1364                                         struct scsi_cmnd *cmd)
1365 {
1366         struct CMD_MESSAGE_FIELD *pcmdmessagefld;
1367         int retvalue = 0, transfer_len = 0;
1368         char *buffer;
1369         struct scatterlist *sg;
1370         uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
1371                                                 (uint32_t ) cmd->cmnd[6] << 16 |
1372                                                 (uint32_t ) cmd->cmnd[7] << 8  |
1373                                                 (uint32_t ) cmd->cmnd[8];
1374                                                 /* 4 bytes: Areca io control code */
1375
1376         sg = scsi_sglist(cmd);
1377         buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1378         if (scsi_sg_count(cmd) > 1) {
1379                 retvalue = ARCMSR_MESSAGE_FAIL;
1380                 goto message_out;
1381         }
1382         transfer_len += sg->length;
1383
1384         if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1385                 retvalue = ARCMSR_MESSAGE_FAIL;
1386                 goto message_out;
1387         }
1388         pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1389         switch(controlcode) {
1390
1391         case ARCMSR_MESSAGE_READ_RQBUFFER: {
1392                 unsigned char *ver_addr;
1393                 uint8_t *pQbuffer, *ptmpQbuffer;
1394                 int32_t allxfer_len = 0;
1395
1396                 ver_addr = kmalloc(1032, GFP_ATOMIC);
1397                 if (!ver_addr) {
1398                         retvalue = ARCMSR_MESSAGE_FAIL;
1399                         goto message_out;
1400                 }
1401                 ptmpQbuffer = ver_addr;
1402                 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1403                         && (allxfer_len < 1031)) {
1404                         pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1405                         memcpy(ptmpQbuffer, pQbuffer, 1);
1406                         acb->rqbuf_firstindex++;
1407                         acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1408                         ptmpQbuffer++;
1409                         allxfer_len++;
1410                 }
1411                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1412
1413                         struct QBUFFER __iomem *prbuffer;
1414                         uint8_t __iomem *iop_data;
1415                         int32_t iop_len;
1416
1417                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1418                         prbuffer = arcmsr_get_iop_rqbuffer(acb);
1419                         iop_data = prbuffer->data;
1420                         iop_len = readl(&prbuffer->data_len);
1421                         while (iop_len > 0) {
1422                                 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
1423                                 acb->rqbuf_lastindex++;
1424                                 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1425                                 iop_data++;
1426                                 iop_len--;
1427                         }
1428                         arcmsr_iop_message_read(acb);
1429                 }
1430                 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1431                 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1432                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1433                 kfree(ver_addr);
1434                 }
1435                 break;
1436
1437         case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1438                 unsigned char *ver_addr;
1439                 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1440                 uint8_t *pQbuffer, *ptmpuserbuffer;
1441
1442                 ver_addr = kmalloc(1032, GFP_ATOMIC);
1443                 if (!ver_addr) {
1444                         retvalue = ARCMSR_MESSAGE_FAIL;
1445                         goto message_out;
1446                 }
1447                 ptmpuserbuffer = ver_addr;
1448                 user_len = pcmdmessagefld->cmdmessage.Length;
1449                 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1450                 wqbuf_lastindex = acb->wqbuf_lastindex;
1451                 wqbuf_firstindex = acb->wqbuf_firstindex;
1452                 if (wqbuf_lastindex != wqbuf_firstindex) {
1453                         struct SENSE_DATA *sensebuffer =
1454                                 (struct SENSE_DATA *)cmd->sense_buffer;
1455                         arcmsr_post_ioctldata2iop(acb);
1456                         /* has error report sensedata */
1457                         sensebuffer->ErrorCode = 0x70;
1458                         sensebuffer->SenseKey = ILLEGAL_REQUEST;
1459                         sensebuffer->AdditionalSenseLength = 0x0A;
1460                         sensebuffer->AdditionalSenseCode = 0x20;
1461                         sensebuffer->Valid = 1;
1462                         retvalue = ARCMSR_MESSAGE_FAIL;
1463                 } else {
1464                         my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
1465                                 &(ARCMSR_MAX_QBUFFER - 1);
1466                         if (my_empty_len >= user_len) {
1467                                 while (user_len > 0) {
1468                                         pQbuffer =
1469                                         &acb->wqbuffer[acb->wqbuf_lastindex];
1470                                         memcpy(pQbuffer, ptmpuserbuffer, 1);
1471                                         acb->wqbuf_lastindex++;
1472                                         acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1473                                         ptmpuserbuffer++;
1474                                         user_len--;
1475                                 }
1476                                 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1477                                         acb->acb_flags &=
1478                                                 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1479                                         arcmsr_post_ioctldata2iop(acb);
1480                                 }
1481                         } else {
1482                                 /* has error report sensedata */
1483                                 struct SENSE_DATA *sensebuffer =
1484                                         (struct SENSE_DATA *)cmd->sense_buffer;
1485                                 sensebuffer->ErrorCode = 0x70;
1486                                 sensebuffer->SenseKey = ILLEGAL_REQUEST;
1487                                 sensebuffer->AdditionalSenseLength = 0x0A;
1488                                 sensebuffer->AdditionalSenseCode = 0x20;
1489                                 sensebuffer->Valid = 1;
1490                                 retvalue = ARCMSR_MESSAGE_FAIL;
1491                         }
1492                         }
1493                         kfree(ver_addr);
1494                 }
1495                 break;
1496
1497         case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1498                 uint8_t *pQbuffer = acb->rqbuffer;
1499
1500                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1501                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1502                         arcmsr_iop_message_read(acb);
1503                 }
1504                 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1505                 acb->rqbuf_firstindex = 0;
1506                 acb->rqbuf_lastindex = 0;
1507                 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1508                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1509                 }
1510                 break;
1511
1512         case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1513                 uint8_t *pQbuffer = acb->wqbuffer;
1514
1515                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1516                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1517                         arcmsr_iop_message_read(acb);
1518                 }
1519                 acb->acb_flags |=
1520                         (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1521                                 ACB_F_MESSAGE_WQBUFFER_READED);
1522                 acb->wqbuf_firstindex = 0;
1523                 acb->wqbuf_lastindex = 0;
1524                 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1525                 pcmdmessagefld->cmdmessage.ReturnCode =
1526                         ARCMSR_MESSAGE_RETURNCODE_OK;
1527                 }
1528                 break;
1529
1530         case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1531                 uint8_t *pQbuffer;
1532
1533                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1534                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1535                         arcmsr_iop_message_read(acb);
1536                 }
1537                 acb->acb_flags |=
1538                         (ACB_F_MESSAGE_WQBUFFER_CLEARED
1539                         | ACB_F_MESSAGE_RQBUFFER_CLEARED
1540                         | ACB_F_MESSAGE_WQBUFFER_READED);
1541                 acb->rqbuf_firstindex = 0;
1542                 acb->rqbuf_lastindex = 0;
1543                 acb->wqbuf_firstindex = 0;
1544                 acb->wqbuf_lastindex = 0;
1545                 pQbuffer = acb->rqbuffer;
1546                 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1547                 pQbuffer = acb->wqbuffer;
1548                 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1549                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1550                 }
1551                 break;
1552
1553         case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1554                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1555                 }
1556                 break;
1557
1558         case ARCMSR_MESSAGE_SAY_HELLO: {
1559                 int8_t *hello_string = "Hello! I am ARCMSR";
1560
1561                 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1562                         , (int16_t)strlen(hello_string));
1563                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1564                 }
1565                 break;
1566
1567         case ARCMSR_MESSAGE_SAY_GOODBYE:
1568                 arcmsr_iop_parking(acb);
1569                 break;
1570
1571         case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1572                 arcmsr_flush_adapter_cache(acb);
1573                 break;
1574
1575         default:
1576                 retvalue = ARCMSR_MESSAGE_FAIL;
1577         }
1578         message_out:
1579         sg = scsi_sglist(cmd);
1580         kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1581         return retvalue;
1582 }
1583
1584 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1585 {
1586         struct list_head *head = &acb->ccb_free_list;
1587         struct CommandControlBlock *ccb = NULL;
1588
1589         if (!list_empty(head)) {
1590                 ccb = list_entry(head->next, struct CommandControlBlock, list);
1591                 list_del(head->next);
1592         }
1593         return ccb;
1594 }
1595
1596 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1597                 struct scsi_cmnd *cmd)
1598 {
1599         switch (cmd->cmnd[0]) {
1600         case INQUIRY: {
1601                 unsigned char inqdata[36];
1602                 char *buffer;
1603                 struct scatterlist *sg;
1604
1605                 if (cmd->device->lun) {
1606                         cmd->result = (DID_TIME_OUT << 16);
1607                         cmd->scsi_done(cmd);
1608                         return;
1609                 }
1610                 inqdata[0] = TYPE_PROCESSOR;
1611                 /* Periph Qualifier & Periph Dev Type */
1612                 inqdata[1] = 0;
1613                 /* rem media bit & Dev Type Modifier */
1614                 inqdata[2] = 0;
1615                 /* ISO, ECMA, & ANSI versions */
1616                 inqdata[4] = 31;
1617                 /* length of additional data */
1618                 strncpy(&inqdata[8], "Areca   ", 8);
1619                 /* Vendor Identification */
1620                 strncpy(&inqdata[16], "RAID controller ", 16);
1621                 /* Product Identification */
1622                 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1623
1624                 sg = scsi_sglist(cmd);
1625                 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1626
1627                 memcpy(buffer, inqdata, sizeof(inqdata));
1628                 sg = scsi_sglist(cmd);
1629                 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1630
1631                 cmd->scsi_done(cmd);
1632         }
1633         break;
1634         case WRITE_BUFFER:
1635         case READ_BUFFER: {
1636                 if (arcmsr_iop_message_xfer(acb, cmd))
1637                         cmd->result = (DID_ERROR << 16);
1638                 cmd->scsi_done(cmd);
1639         }
1640         break;
1641         default:
1642                 cmd->scsi_done(cmd);
1643         }
1644 }
1645
1646 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1647         void (* done)(struct scsi_cmnd *))
1648 {
1649         struct Scsi_Host *host = cmd->device->host;
1650         struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1651         struct CommandControlBlock *ccb;
1652         int target = cmd->device->id;
1653         int lun = cmd->device->lun;
1654
1655         cmd->scsi_done = done;
1656         cmd->host_scribble = NULL;
1657         cmd->result = 0;
1658         if (acb->acb_flags & ACB_F_BUS_RESET) {
1659                 printk(KERN_NOTICE "arcmsr%d: bus reset"
1660                         " and return busy \n"
1661                         , acb->host->host_no);
1662                 return SCSI_MLQUEUE_HOST_BUSY;
1663         }
1664         if (target == 16) {
1665                 /* virtual device for iop message transfer */
1666                 arcmsr_handle_virtual_command(acb, cmd);
1667                 return 0;
1668         }
1669         if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1670                 uint8_t block_cmd;
1671
1672                 block_cmd = cmd->cmnd[0] & 0x0f;
1673                 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1674                         printk(KERN_NOTICE
1675                                 "arcmsr%d: block 'read/write'"
1676                                 "command with gone raid volume"
1677                                 " Cmd = %2x, TargetId = %d, Lun = %d \n"
1678                                 , acb->host->host_no
1679                                 , cmd->cmnd[0]
1680                                 , target, lun);
1681                         cmd->result = (DID_NO_CONNECT << 16);
1682                         cmd->scsi_done(cmd);
1683                         return 0;
1684                 }
1685         }
1686         if (atomic_read(&acb->ccboutstandingcount) >=
1687                         ARCMSR_MAX_OUTSTANDING_CMD)
1688                 return SCSI_MLQUEUE_HOST_BUSY;
1689
1690         ccb = arcmsr_get_freeccb(acb);
1691         if (!ccb)
1692                 return SCSI_MLQUEUE_HOST_BUSY;
1693         if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
1694                 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
1695                 cmd->scsi_done(cmd);
1696                 return 0;
1697         }
1698         arcmsr_post_ccb(acb, ccb);
1699         return 0;
1700 }
1701
1702 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
1703 {
1704         struct MessageUnit_A __iomem *reg = acb->pmuA;
1705         char *acb_firm_model = acb->firm_model;
1706         char *acb_firm_version = acb->firm_version;
1707         char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
1708         char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
1709         int count;
1710
1711         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1712         if (arcmsr_hba_wait_msgint_ready(acb)) {
1713                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1714                         miscellaneous data' timeout \n", acb->host->host_no);
1715         }
1716
1717         count = 8;
1718         while (count) {
1719                 *acb_firm_model = readb(iop_firm_model);
1720                 acb_firm_model++;
1721                 iop_firm_model++;
1722                 count--;
1723         }
1724
1725         count = 16;
1726         while (count) {
1727                 *acb_firm_version = readb(iop_firm_version);
1728                 acb_firm_version++;
1729                 iop_firm_version++;
1730                 count--;
1731         }
1732
1733         printk(KERN_INFO        "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1734                 , acb->host->host_no
1735                 , acb->firm_version);
1736
1737         acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1738         acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1739         acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1740         acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1741 }
1742
1743 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1744 {
1745         struct MessageUnit_B *reg = acb->pmuB;
1746         uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
1747         char *acb_firm_model = acb->firm_model;
1748         char *acb_firm_version = acb->firm_version;
1749         char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
1750         /*firm_model,15,60-67*/
1751         char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
1752         /*firm_version,17,68-83*/
1753         int count;
1754
1755         writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
1756         if (arcmsr_hbb_wait_msgint_ready(acb)) {
1757                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1758                         miscellaneous data' timeout \n", acb->host->host_no);
1759         }
1760
1761         count = 8;
1762         while (count)
1763         {
1764                 *acb_firm_model = readb(iop_firm_model);
1765                 acb_firm_model++;
1766                 iop_firm_model++;
1767                 count--;
1768         }
1769
1770         count = 16;
1771         while (count)
1772         {
1773                 *acb_firm_version = readb(iop_firm_version);
1774                 acb_firm_version++;
1775                 iop_firm_version++;
1776                 count--;
1777         }
1778
1779         printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
1780                         acb->host->host_no,
1781                         acb->firm_version);
1782
1783         lrwbuffer++;
1784         acb->firm_request_len = readl(lrwbuffer++);
1785         /*firm_request_len,1,04-07*/
1786         acb->firm_numbers_queue = readl(lrwbuffer++);
1787         /*firm_numbers_queue,2,08-11*/
1788         acb->firm_sdram_size = readl(lrwbuffer++);
1789         /*firm_sdram_size,3,12-15*/
1790         acb->firm_hd_channels = readl(lrwbuffer);
1791         /*firm_ide_channels,4,16-19*/
1792 }
1793
1794 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1795 {
1796         switch (acb->adapter_type) {
1797         case ACB_ADAPTER_TYPE_A: {
1798                 arcmsr_get_hba_config(acb);
1799                 }
1800                 break;
1801
1802         case ACB_ADAPTER_TYPE_B: {
1803                 arcmsr_get_hbb_config(acb);
1804                 }
1805                 break;
1806         }
1807 }
1808
1809 static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
1810         struct CommandControlBlock *poll_ccb)
1811 {
1812         struct MessageUnit_A __iomem *reg = acb->pmuA;
1813         struct CommandControlBlock *ccb;
1814         uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1815
1816         polling_hba_ccb_retry:
1817         poll_count++;
1818         outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
1819         writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1820         while (1) {
1821                 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1822                         if (poll_ccb_done)
1823                                 break;
1824                         else {
1825                                 msleep(25);
1826                                 if (poll_count > 100)
1827                                         break;
1828                                 goto polling_hba_ccb_retry;
1829                         }
1830                 }
1831                 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
1832                 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1833                 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1834                         if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1835                                 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
1836                                         " poll command abort successfully \n"
1837                                         , acb->host->host_no
1838                                         , ccb->pcmd->device->id
1839                                         , ccb->pcmd->device->lun
1840                                         , ccb);
1841                                 ccb->pcmd->result = DID_ABORT << 16;
1842                                 arcmsr_ccb_complete(ccb, 1);
1843                                 poll_ccb_done = 1;
1844                                 continue;
1845                         }
1846                         printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
1847                                 " command done ccb = '0x%p'"
1848                                 "ccboutstandingcount = %d \n"
1849                                 , acb->host->host_no
1850                                 , ccb
1851                                 , atomic_read(&acb->ccboutstandingcount));
1852                         continue;
1853                 }
1854                 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
1855         }
1856 }
1857
1858 static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
1859                                         struct CommandControlBlock *poll_ccb)
1860 {
1861                 struct MessageUnit_B *reg = acb->pmuB;
1862                 struct CommandControlBlock *ccb;
1863                 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
1864                 int index;
1865
1866         polling_hbb_ccb_retry:
1867                 poll_count++;
1868                 /* clear doorbell interrupt */
1869                 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
1870                 while (1) {
1871                         index = reg->doneq_index;
1872                         if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
1873                                 if (poll_ccb_done)
1874                                         break;
1875                                 else {
1876                                         msleep(25);
1877                                         if (poll_count > 100)
1878                                                 break;
1879                                         goto polling_hbb_ccb_retry;
1880                                 }
1881                         }
1882                         writel(0, &reg->done_qbuffer[index]);
1883                         index++;
1884                         /*if last index number set it to 0 */
1885                         index %= ARCMSR_MAX_HBB_POSTQUEUE;
1886                         reg->doneq_index = index;
1887                         /* check ifcommand done with no error*/
1888                         ccb = (struct CommandControlBlock *)\
1889       (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1890                         poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1891                         if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1892                                 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1893                                         printk(KERN_NOTICE "arcmsr%d: \
1894                 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
1895                                                 ,acb->host->host_no
1896                                                 ,ccb->pcmd->device->id
1897                                                 ,ccb->pcmd->device->lun
1898                                                 ,ccb);
1899                                         ccb->pcmd->result = DID_ABORT << 16;
1900                                         arcmsr_ccb_complete(ccb, 1);
1901                                         continue;
1902                                 }
1903                                 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
1904                                         " command done ccb = '0x%p'"
1905                                         "ccboutstandingcount = %d \n"
1906                                         , acb->host->host_no
1907                                         , ccb
1908                                         , atomic_read(&acb->ccboutstandingcount));
1909                                 continue;
1910                         }
1911                         arcmsr_report_ccb_state(acb, ccb, flag_ccb);
1912                 }       /*drain reply FIFO*/
1913 }
1914
1915 static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1916                                         struct CommandControlBlock *poll_ccb)
1917 {
1918         switch (acb->adapter_type) {
1919
1920         case ACB_ADAPTER_TYPE_A: {
1921                 arcmsr_polling_hba_ccbdone(acb,poll_ccb);
1922                 }
1923                 break;
1924
1925         case ACB_ADAPTER_TYPE_B: {
1926                 arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
1927                 }
1928         }
1929 }
1930
1931 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
1932 {
1933         uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
1934         dma_addr_t dma_coherent_handle;
1935         /*
1936         ********************************************************************
1937         ** here we need to tell iop 331 our freeccb.HighPart
1938         ** if freeccb.HighPart is not zero
1939         ********************************************************************
1940         */
1941         dma_coherent_handle = acb->dma_coherent_handle;
1942         cdb_phyaddr = (uint32_t)(dma_coherent_handle);
1943         ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
1944         /*
1945         ***********************************************************************
1946         **    if adapter type B, set window of "post command Q"
1947         ***********************************************************************
1948         */
1949         switch (acb->adapter_type) {
1950
1951         case ACB_ADAPTER_TYPE_A: {
1952                 if (ccb_phyaddr_hi32 != 0) {
1953                         struct MessageUnit_A __iomem *reg = acb->pmuA;
1954                         uint32_t intmask_org;
1955                         intmask_org = arcmsr_disable_outbound_ints(acb);
1956                         writel(ARCMSR_SIGNATURE_SET_CONFIG, \
1957                                                 &reg->message_rwbuffer[0]);
1958                         writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
1959                         writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
1960                                                         &reg->inbound_msgaddr0);
1961                         if (arcmsr_hba_wait_msgint_ready(acb)) {
1962                                 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
1963                                 part physical address timeout\n",
1964                                 acb->host->host_no);
1965                                 return 1;
1966                         }
1967                         arcmsr_enable_outbound_ints(acb, intmask_org);
1968                 }
1969                 }
1970                 break;
1971
1972         case ACB_ADAPTER_TYPE_B: {
1973                 unsigned long post_queue_phyaddr;
1974                 uint32_t __iomem *rwbuffer;
1975
1976                 struct MessageUnit_B *reg = acb->pmuB;
1977                 uint32_t intmask_org;
1978                 intmask_org = arcmsr_disable_outbound_ints(acb);
1979                 reg->postq_index = 0;
1980                 reg->doneq_index = 0;
1981                 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
1982                 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1983                         printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
1984                                 acb->host->host_no);
1985                         return 1;
1986                 }
1987                 post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
1988                 sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
1989                 rwbuffer = reg->msgcode_rwbuffer_reg;
1990                 /* driver "set config" signature */
1991                 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
1992                 /* normal should be zero */
1993                 writel(ccb_phyaddr_hi32, rwbuffer++);
1994                 /* postQ size (256 + 8)*4        */
1995                 writel(post_queue_phyaddr, rwbuffer++);
1996                 /* doneQ size (256 + 8)*4        */
1997                 writel(post_queue_phyaddr + 1056, rwbuffer++);
1998                 /* ccb maxQ size must be --> [(256 + 8)*4]*/
1999                 writel(1056, rwbuffer);
2000
2001                 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
2002                 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2003                         printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2004                         timeout \n",acb->host->host_no);
2005                         return 1;
2006                 }
2007
2008                 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
2009                 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2010                         printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
2011                         ,acb->host->host_no);
2012                         return 1;
2013                 }
2014                 arcmsr_enable_outbound_ints(acb, intmask_org);
2015                 }
2016                 break;
2017         }
2018         return 0;
2019 }
2020
2021 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2022 {
2023         uint32_t firmware_state = 0;
2024
2025         switch (acb->adapter_type) {
2026
2027         case ACB_ADAPTER_TYPE_A: {
2028                 struct MessageUnit_A __iomem *reg = acb->pmuA;
2029                 do {
2030                         firmware_state = readl(&reg->outbound_msgaddr1);
2031                 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
2032                 }
2033                 break;
2034
2035         case ACB_ADAPTER_TYPE_B: {
2036                 struct MessageUnit_B *reg = acb->pmuB;
2037                 do {
2038                         firmware_state = readl(reg->iop2drv_doorbell_reg);
2039                 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2040                 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
2041                 }
2042                 break;
2043         }
2044 }
2045
2046 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2047 {
2048         struct MessageUnit_A __iomem *reg = acb->pmuA;
2049         acb->acb_flags |= ACB_F_MSG_START_BGRB;
2050         writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2051         if (arcmsr_hba_wait_msgint_ready(acb)) {
2052                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2053                                 rebulid' timeout \n", acb->host->host_no);
2054         }
2055 }
2056
2057 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2058 {
2059         struct MessageUnit_B *reg = acb->pmuB;
2060         acb->acb_flags |= ACB_F_MSG_START_BGRB;
2061         writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
2062         if (arcmsr_hbb_wait_msgint_ready(acb)) {
2063                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2064                                 rebulid' timeout \n",acb->host->host_no);
2065         }
2066 }
2067
2068 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2069 {
2070         switch (acb->adapter_type) {
2071         case ACB_ADAPTER_TYPE_A:
2072                 arcmsr_start_hba_bgrb(acb);
2073                 break;
2074         case ACB_ADAPTER_TYPE_B:
2075                 arcmsr_start_hbb_bgrb(acb);
2076                 break;
2077         }
2078 }
2079
2080 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2081 {
2082         switch (acb->adapter_type) {
2083         case ACB_ADAPTER_TYPE_A: {
2084                 struct MessageUnit_A __iomem *reg = acb->pmuA;
2085                 uint32_t outbound_doorbell;
2086                 /* empty doorbell Qbuffer if door bell ringed */
2087                 outbound_doorbell = readl(&reg->outbound_doorbell);
2088                 /*clear doorbell interrupt */
2089                 writel(outbound_doorbell, &reg->outbound_doorbell);
2090                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2091                 }
2092                 break;
2093
2094         case ACB_ADAPTER_TYPE_B: {
2095                 struct MessageUnit_B *reg = acb->pmuB;
2096                 /*clear interrupt and message state*/
2097                 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
2098                 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
2099                 /* let IOP know data has been read */
2100                 }
2101                 break;
2102         }
2103 }
2104
2105 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2106 {
2107         switch (acb->adapter_type) {
2108         case ACB_ADAPTER_TYPE_A:
2109                 return;
2110         case ACB_ADAPTER_TYPE_B:
2111                 {
2112                         struct MessageUnit_B *reg = acb->pmuB;
2113                         writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
2114                         if(arcmsr_hbb_wait_msgint_ready(acb)) {
2115                                 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2116                                 return;
2117                         }
2118                 }
2119                 break;
2120         }
2121         return;
2122 }
2123
2124 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2125 {
2126         uint32_t intmask_org;
2127
2128        /* disable all outbound interrupt */
2129        intmask_org = arcmsr_disable_outbound_ints(acb);
2130         arcmsr_wait_firmware_ready(acb);
2131         arcmsr_iop_confirm(acb);
2132         arcmsr_get_firmware_spec(acb);
2133         /*start background rebuild*/
2134         arcmsr_start_adapter_bgrb(acb);
2135         /* empty doorbell Qbuffer if door bell ringed */
2136         arcmsr_clear_doorbell_queue_buffer(acb);
2137         arcmsr_enable_eoi_mode(acb);
2138         /* enable outbound Post Queue,outbound doorbell Interrupt */
2139         arcmsr_enable_outbound_ints(acb, intmask_org);
2140         acb->acb_flags |= ACB_F_IOP_INITED;
2141 }
2142
2143 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
2144 {
2145         struct CommandControlBlock *ccb;
2146         uint32_t intmask_org;
2147         int i = 0;
2148
2149         if (atomic_read(&acb->ccboutstandingcount) != 0) {
2150                 /* talk to iop 331 outstanding command aborted */
2151                 arcmsr_abort_allcmd(acb);
2152
2153                 /* wait for 3 sec for all command aborted*/
2154                 ssleep(3);
2155
2156                 /* disable all outbound interrupt */
2157                 intmask_org = arcmsr_disable_outbound_ints(acb);
2158                 /* clear all outbound posted Q */
2159                 arcmsr_done4abort_postqueue(acb);
2160                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2161                         ccb = acb->pccb_pool[i];
2162                         if (ccb->startdone == ARCMSR_CCB_START) {
2163                                 ccb->startdone = ARCMSR_CCB_ABORTED;
2164                                 arcmsr_ccb_complete(ccb, 1);
2165                         }
2166                 }
2167                 /* enable all outbound interrupt */
2168                 arcmsr_enable_outbound_ints(acb, intmask_org);
2169         }
2170 }
2171
2172 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2173 {
2174         struct AdapterControlBlock *acb =
2175                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2176         int i;
2177
2178         acb->num_resets++;
2179         acb->acb_flags |= ACB_F_BUS_RESET;
2180         for (i = 0; i < 400; i++) {
2181                 if (!atomic_read(&acb->ccboutstandingcount))
2182                         break;
2183                 arcmsr_interrupt(acb);/* FIXME: need spinlock */
2184                 msleep(25);
2185         }
2186         arcmsr_iop_reset(acb);
2187         acb->acb_flags &= ~ACB_F_BUS_RESET;
2188         return SUCCESS;
2189 }
2190
2191 static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
2192                 struct CommandControlBlock *ccb)
2193 {
2194         u32 intmask;
2195
2196         ccb->startdone = ARCMSR_CCB_ABORTED;
2197
2198         /*
2199         ** Wait for 3 sec for all command done.
2200         */
2201         ssleep(3);
2202
2203         intmask = arcmsr_disable_outbound_ints(acb);
2204         arcmsr_polling_ccbdone(acb, ccb);
2205         arcmsr_enable_outbound_ints(acb, intmask);
2206 }
2207
2208 static int arcmsr_abort(struct scsi_cmnd *cmd)
2209 {
2210         struct AdapterControlBlock *acb =
2211                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2212         int i = 0;
2213
2214         printk(KERN_NOTICE
2215                 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
2216                 acb->host->host_no, cmd->device->id, cmd->device->lun);
2217         acb->num_aborts++;
2218         /*
2219         ************************************************
2220         ** the all interrupt service routine is locked
2221         ** we need to handle it as soon as possible and exit
2222         ************************************************
2223         */
2224         if (!atomic_read(&acb->ccboutstandingcount))
2225                 return SUCCESS;
2226
2227         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2228                 struct CommandControlBlock *ccb = acb->pccb_pool[i];
2229                 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
2230                         arcmsr_abort_one_cmd(acb, ccb);
2231                         break;
2232                 }
2233         }
2234
2235         return SUCCESS;
2236 }
2237
2238 static const char *arcmsr_info(struct Scsi_Host *host)
2239 {
2240         struct AdapterControlBlock *acb =
2241                 (struct AdapterControlBlock *) host->hostdata;
2242         static char buf[256];
2243         char *type;
2244         int raid6 = 1;
2245
2246         switch (acb->pdev->device) {
2247         case PCI_DEVICE_ID_ARECA_1110:
2248         case PCI_DEVICE_ID_ARECA_1200:
2249         case PCI_DEVICE_ID_ARECA_1202:
2250         case PCI_DEVICE_ID_ARECA_1210:
2251                 raid6 = 0;
2252                 /*FALLTHRU*/
2253         case PCI_DEVICE_ID_ARECA_1120:
2254         case PCI_DEVICE_ID_ARECA_1130:
2255         case PCI_DEVICE_ID_ARECA_1160:
2256         case PCI_DEVICE_ID_ARECA_1170:
2257         case PCI_DEVICE_ID_ARECA_1201:
2258         case PCI_DEVICE_ID_ARECA_1220:
2259         case PCI_DEVICE_ID_ARECA_1230:
2260         case PCI_DEVICE_ID_ARECA_1260:
2261         case PCI_DEVICE_ID_ARECA_1270:
2262         case PCI_DEVICE_ID_ARECA_1280:
2263                 type = "SATA";
2264                 break;
2265         case PCI_DEVICE_ID_ARECA_1380:
2266         case PCI_DEVICE_ID_ARECA_1381:
2267         case PCI_DEVICE_ID_ARECA_1680:
2268         case PCI_DEVICE_ID_ARECA_1681:
2269                 type = "SAS";
2270                 break;
2271         default:
2272                 type = "X-TYPE";
2273                 break;
2274         }
2275         sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
2276                         type, raid6 ? "( RAID6 capable)" : "",
2277                         ARCMSR_DRIVER_VERSION);
2278         return buf;
2279 }
2280 #ifdef CONFIG_SCSI_ARCMSR_AER
2281 static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2282 {
2283         struct Scsi_Host *host = pci_get_drvdata(pdev);
2284         struct AdapterControlBlock *acb =
2285                 (struct AdapterControlBlock *) host->hostdata;
2286         uint32_t intmask_org;
2287         int i, j;
2288
2289         if (pci_enable_device(pdev)) {
2290                 return PCI_ERS_RESULT_DISCONNECT;
2291         }
2292         pci_set_master(pdev);
2293         intmask_org = arcmsr_disable_outbound_ints(acb);
2294         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2295                            ACB_F_MESSAGE_RQBUFFER_CLEARED |
2296                            ACB_F_MESSAGE_WQBUFFER_READED);
2297         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2298         for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
2299                 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
2300                         acb->devstate[i][j] = ARECA_RAID_GONE;
2301
2302         arcmsr_wait_firmware_ready(acb);
2303         arcmsr_iop_confirm(acb);
2304        /* disable all outbound interrupt */
2305         arcmsr_get_firmware_spec(acb);
2306         /*start background rebuild*/
2307         arcmsr_start_adapter_bgrb(acb);
2308         /* empty doorbell Qbuffer if door bell ringed */
2309         arcmsr_clear_doorbell_queue_buffer(acb);
2310         arcmsr_enable_eoi_mode(acb);
2311         /* enable outbound Post Queue,outbound doorbell Interrupt */
2312         arcmsr_enable_outbound_ints(acb, intmask_org);
2313         acb->acb_flags |= ACB_F_IOP_INITED;
2314
2315         pci_enable_pcie_error_reporting(pdev);
2316         return PCI_ERS_RESULT_RECOVERED;
2317 }
2318
2319 static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
2320 {
2321         struct Scsi_Host *host = pci_get_drvdata(pdev);
2322         struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
2323         struct CommandControlBlock *ccb;
2324         uint32_t intmask_org;
2325         int i = 0;
2326
2327         if (atomic_read(&acb->ccboutstandingcount) != 0) {
2328                 /* talk to iop 331 outstanding command aborted */
2329                 arcmsr_abort_allcmd(acb);
2330                 /* wait for 3 sec for all command aborted*/
2331                 ssleep(3);
2332                 /* disable all outbound interrupt */
2333                 intmask_org = arcmsr_disable_outbound_ints(acb);
2334                 /* clear all outbound posted Q */
2335                 arcmsr_done4abort_postqueue(acb);
2336                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2337                         ccb = acb->pccb_pool[i];
2338                         if (ccb->startdone == ARCMSR_CCB_START) {
2339                                 ccb->startdone = ARCMSR_CCB_ABORTED;
2340                                 arcmsr_ccb_complete(ccb, 1);
2341                         }
2342                 }
2343                 /* enable all outbound interrupt */
2344                 arcmsr_enable_outbound_ints(acb, intmask_org);
2345         }
2346         pci_disable_device(pdev);
2347 }
2348
2349 static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
2350 {
2351                         struct Scsi_Host *host = pci_get_drvdata(pdev);
2352                         struct AdapterControlBlock *acb = \
2353                                 (struct AdapterControlBlock *)host->hostdata;
2354
2355                         arcmsr_stop_adapter_bgrb(acb);
2356                         arcmsr_flush_adapter_cache(acb);
2357 }
2358
2359 static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
2360                                                 pci_channel_state_t state)
2361 {
2362         switch (state) {
2363         case pci_channel_io_frozen:
2364                         arcmsr_pci_ers_need_reset_forepart(pdev);
2365                         return PCI_ERS_RESULT_NEED_RESET;
2366         case pci_channel_io_perm_failure:
2367                         arcmsr_pci_ers_disconnect_forepart(pdev);
2368                         return PCI_ERS_RESULT_DISCONNECT;
2369                         break;
2370         default:
2371                         return PCI_ERS_RESULT_NEED_RESET;
2372           }
2373 }
2374 #endif