be2net: implement EEH pci error recovery handlers
[safe/jmp/linux-2.6] / drivers / scsi / hptiop.c
index 8515054..4f05565 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * HighPoint RR3xxx controller driver for Linux
- * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
+ * HighPoint RR3xxx/4xxx controller driver for Linux
+ * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/spinlock.h>
-#include <linux/hdreg.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/div64.h>
 #include "hptiop.h"
 
 MODULE_AUTHOR("HighPoint Technologies, Inc.");
-MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
+MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
 
 static char driver_name[] = "hptiop";
-static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
-static const char driver_ver[] = "v1.2 (070830)";
-
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
-static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
+static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
+static const char driver_ver[] = "v1.6 (090910)";
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+                               struct hpt_iop_request_scsi_command *req);
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
 
-static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
-{
-       readl(&iop->outbound_intstatus);
-}
-
-static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
+static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
 {
        u32 req = 0;
        int i;
 
        for (i = 0; i < millisec; i++) {
-               req = readl(&iop->inbound_queue);
+               req = readl(&hba->u.itl.iop->inbound_queue);
                if (req != IOPMU_QUEUE_EMPTY)
                        break;
                msleep(1);
        }
 
        if (req != IOPMU_QUEUE_EMPTY) {
-               writel(req, &iop->outbound_queue);
-               hptiop_pci_posting_flush(iop);
+               writel(req, &hba->u.itl.iop->outbound_queue);
+               readl(&hba->u.itl.iop->outbound_intstatus);
                return 0;
        }
 
        return -1;
 }
 
-static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
+static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
+{
+       return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
+static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
 {
        if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
-               return hptiop_host_request_callback(hba,
+               hptiop_host_request_callback_itl(hba,
                                tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
        else
-               return hptiop_iop_request_callback(hba, tag);
+               hptiop_iop_request_callback_itl(hba, tag);
 }
 
-static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
+static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
 {
        u32 req;
 
-       while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
+       while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
+                                               IOPMU_QUEUE_EMPTY) {
 
                if (req & IOPMU_QUEUE_MASK_HOST_BITS)
-                       hptiop_request_callback(hba, req);
+                       hptiop_request_callback_itl(hba, req);
                else {
                        struct hpt_iop_request_header __iomem * p;
 
                        p = (struct hpt_iop_request_header __iomem *)
-                               ((char __iomem *)hba->iop + req);
+                               ((char __iomem *)hba->u.itl.iop + req);
 
                        if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
                                if (readl(&p->context))
-                                       hptiop_request_callback(hba, req);
+                                       hptiop_request_callback_itl(hba, req);
                                else
                                        writel(1, &p->context);
                        }
                        else
-                               hptiop_request_callback(hba, req);
+                               hptiop_request_callback_itl(hba, req);
                }
        }
 }
 
-static int __iop_intr(struct hptiop_hba *hba)
+static int iop_intr_itl(struct hptiop_hba *hba)
 {
-       struct hpt_iopmu __iomem *iop = hba->iop;
+       struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
+       void __iomem *plx = hba->u.itl.plx;
        u32 status;
        int ret = 0;
 
+       if (plx && readl(plx + 0x11C5C) & 0xf)
+               writel(1, plx + 0x11C60);
+
        status = readl(&iop->outbound_intstatus);
 
        if (status & IOPMU_OUTBOUND_INT_MSG0) {
                u32 msg = readl(&iop->outbound_msgaddr0);
+
                dprintk("received outbound msg %x\n", msg);
                writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
                hptiop_message_callback(hba, msg);
@@ -126,31 +134,115 @@ static int __iop_intr(struct hptiop_hba *hba)
        }
 
        if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
-               hptiop_drain_outbound_queue(hba);
+               hptiop_drain_outbound_queue_itl(hba);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
+{
+       u32 outbound_tail = readl(&mu->outbound_tail);
+       u32 outbound_head = readl(&mu->outbound_head);
+
+       if (outbound_tail != outbound_head) {
+               u64 p;
+
+               memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
+               outbound_tail++;
+
+               if (outbound_tail == MVIOP_QUEUE_LEN)
+                       outbound_tail = 0;
+               writel(outbound_tail, &mu->outbound_tail);
+               return p;
+       } else
+               return 0;
+}
+
+static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
+{
+       u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
+       u32 head = inbound_head + 1;
+
+       if (head == MVIOP_QUEUE_LEN)
+               head = 0;
+
+       memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
+       writel(head, &hba->u.mv.mu->inbound_head);
+       writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
+                       &hba->u.mv.regs->inbound_doorbell);
+}
+
+static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
+{
+       u32 req_type = (tag >> 5) & 0x7;
+       struct hpt_iop_request_scsi_command *req;
+
+       dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
+
+       BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
+
+       switch (req_type) {
+       case IOP_REQUEST_TYPE_GET_CONFIG:
+       case IOP_REQUEST_TYPE_SET_CONFIG:
+               hba->msg_done = 1;
+               break;
+
+       case IOP_REQUEST_TYPE_SCSI_COMMAND:
+               req = hba->reqs[tag >> 8].req_virt;
+               if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
+                       req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+
+               hptiop_finish_scsi_req(hba, tag>>8, req);
+               break;
+
+       default:
+               break;
+       }
+}
+
+static int iop_intr_mv(struct hptiop_hba *hba)
+{
+       u32 status;
+       int ret = 0;
+
+       status = readl(&hba->u.mv.regs->outbound_doorbell);
+       writel(~status, &hba->u.mv.regs->outbound_doorbell);
+
+       if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
+               u32 msg;
+               msg = readl(&hba->u.mv.mu->outbound_msg);
+               dprintk("received outbound msg %x\n", msg);
+               hptiop_message_callback(hba, msg);
+               ret = 1;
+       }
+
+       if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
+               u64 tag;
+
+               while ((tag = mv_outbound_read(hba->u.mv.mu)))
+                       hptiop_request_callback_mv(hba, tag);
                ret = 1;
        }
 
        return ret;
 }
 
-static int iop_send_sync_request(struct hptiop_hba *hba,
+static int iop_send_sync_request_itl(struct hptiop_hba *hba,
                                        void __iomem *_req, u32 millisec)
 {
        struct hpt_iop_request_header __iomem *req = _req;
        u32 i;
 
-       writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
-                       &req->flags);
-
+       writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
        writel(0, &req->context);
-
-       writel((unsigned long)req - (unsigned long)hba->iop,
-                       &hba->iop->inbound_queue);
-
-       hptiop_pci_posting_flush(hba->iop);
+       writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
+                       &hba->u.itl.iop->inbound_queue);
+       readl(&hba->u.itl.iop->outbound_intstatus);
 
        for (i = 0; i < millisec; i++) {
-               __iop_intr(hba);
+               iop_intr_itl(hba);
                if (readl(&req->context))
                        return 0;
                msleep(1);
@@ -159,19 +251,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
        return -1;
 }
 
-static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+static int iop_send_sync_request_mv(struct hptiop_hba *hba,
+                                       u32 size_bits, u32 millisec)
 {
+       struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
        u32 i;
 
        hba->msg_done = 0;
+       reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+       mv_inbound_write(hba->u.mv.internal_req_phy |
+                       MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
 
-       writel(msg, &hba->iop->inbound_msgaddr0);
+       for (i = 0; i < millisec; i++) {
+               iop_intr_mv(hba);
+               if (hba->msg_done)
+                       return 0;
+               msleep(1);
+       }
+       return -1;
+}
 
-       hptiop_pci_posting_flush(hba->iop);
+static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
+{
+       writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
+       readl(&hba->u.itl.iop->outbound_intstatus);
+}
+
+static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
+{
+       writel(msg, &hba->u.mv.mu->inbound_msg);
+       writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
+       readl(&hba->u.mv.regs->inbound_doorbell);
+}
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+{
+       u32 i;
+
+       hba->msg_done = 0;
+       hba->ops->post_msg(hba, msg);
 
        for (i = 0; i < millisec; i++) {
                spin_lock_irq(hba->host->host_lock);
-               __iop_intr(hba);
+               hba->ops->iop_intr(hba);
                spin_unlock_irq(hba->host->host_lock);
                if (hba->msg_done)
                        break;
@@ -181,46 +303,68 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
        return hba->msg_done? 0 : -1;
 }
 
-static int iop_get_config(struct hptiop_hba *hba,
+static int iop_get_config_itl(struct hptiop_hba *hba,
                                struct hpt_iop_request_get_config *config)
 {
        u32 req32;
        struct hpt_iop_request_get_config __iomem *req;
 
-       req32 = readl(&hba->iop->inbound_queue);
+       req32 = readl(&hba->u.itl.iop->inbound_queue);
        if (req32 == IOPMU_QUEUE_EMPTY)
                return -1;
 
        req = (struct hpt_iop_request_get_config __iomem *)
-                       ((unsigned long)hba->iop + req32);
+                       ((unsigned long)hba->u.itl.iop + req32);
 
        writel(0, &req->header.flags);
        writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
        writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
        writel(IOP_RESULT_PENDING, &req->header.result);
 
-       if (iop_send_sync_request(hba, req, 20000)) {
+       if (iop_send_sync_request_itl(hba, req, 20000)) {
                dprintk("Get config send cmd failed\n");
                return -1;
        }
 
        memcpy_fromio(config, req, sizeof(*config));
-       writel(req32, &hba->iop->outbound_queue);
+       writel(req32, &hba->u.itl.iop->outbound_queue);
        return 0;
 }
 
-static int iop_set_config(struct hptiop_hba *hba,
+static int iop_get_config_mv(struct hptiop_hba *hba,
+                               struct hpt_iop_request_get_config *config)
+{
+       struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
+
+       req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+       req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
+       req->header.size =
+               cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
+       req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+       req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+       req->header.context_hi32 = 0;
+
+       if (iop_send_sync_request_mv(hba, 0, 20000)) {
+               dprintk("Get config send cmd failed\n");
+               return -1;
+       }
+
+       memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
+       return 0;
+}
+
+static int iop_set_config_itl(struct hptiop_hba *hba,
                                struct hpt_iop_request_set_config *config)
 {
        u32 req32;
        struct hpt_iop_request_set_config __iomem *req;
 
-       req32 = readl(&hba->iop->inbound_queue);
+       req32 = readl(&hba->u.itl.iop->inbound_queue);
        if (req32 == IOPMU_QUEUE_EMPTY)
                return -1;
 
        req = (struct hpt_iop_request_set_config __iomem *)
-                       ((unsigned long)hba->iop + req32);
+                       ((unsigned long)hba->u.itl.iop + req32);
 
        memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
                (u8 *)config + sizeof(struct hpt_iop_request_header),
@@ -232,22 +376,53 @@ static int iop_set_config(struct hptiop_hba *hba,
        writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
        writel(IOP_RESULT_PENDING, &req->header.result);
 
-       if (iop_send_sync_request(hba, req, 20000)) {
+       if (iop_send_sync_request_itl(hba, req, 20000)) {
                dprintk("Set config send cmd failed\n");
                return -1;
        }
 
-       writel(req32, &hba->iop->outbound_queue);
+       writel(req32, &hba->u.itl.iop->outbound_queue);
        return 0;
 }
 
-static int hptiop_initialize_iop(struct hptiop_hba *hba)
+static int iop_set_config_mv(struct hptiop_hba *hba,
+                               struct hpt_iop_request_set_config *config)
 {
-       struct hpt_iopmu __iomem *iop = hba->iop;
+       struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
 
-       /* enable interrupts */
+       memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+       req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+       req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+       req->header.size =
+               cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+       req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+       req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+       req->header.context_hi32 = 0;
+
+       if (iop_send_sync_request_mv(hba, 0, 20000)) {
+               dprintk("Set config send cmd failed\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
+{
        writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
-                       &iop->outbound_intmask);
+               &hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
+{
+       writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
+               &hba->u.mv.regs->outbound_intmask);
+}
+
+static int hptiop_initialize_iop(struct hptiop_hba *hba)
+{
+       /* enable interrupts */
+       hba->ops->enable_intr(hba);
 
        hba->initialized = 1;
 
@@ -261,37 +436,84 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
        return 0;
 }
 
-static int hptiop_map_pci_bar(struct hptiop_hba *hba)
+static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
 {
        u32 mem_base_phy, length;
        void __iomem *mem_base_virt;
+
        struct pci_dev *pcidev = hba->pcidev;
 
-       if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
+
+       if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
                printk(KERN_ERR "scsi%d: pci resource invalid\n",
                                hba->host->host_no);
-               return -1;
+               return NULL;
        }
 
-       mem_base_phy = pci_resource_start(pcidev, 0);
-       length = pci_resource_len(pcidev, 0);
+       mem_base_phy = pci_resource_start(pcidev, index);
+       length = pci_resource_len(pcidev, index);
        mem_base_virt = ioremap(mem_base_phy, length);
 
        if (!mem_base_virt) {
                printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
                                hba->host->host_no);
+               return NULL;
+       }
+       return mem_base_virt;
+}
+
+static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
+{
+       struct pci_dev *pcidev = hba->pcidev;
+       hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
+       if (hba->u.itl.iop == NULL)
+               return -1;
+       if ((pcidev->device & 0xff00) == 0x4400) {
+               hba->u.itl.plx = hba->u.itl.iop;
+               hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
+               if (hba->u.itl.iop == NULL) {
+                       iounmap(hba->u.itl.plx);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
+{
+       if (hba->u.itl.plx)
+               iounmap(hba->u.itl.plx);
+       iounmap(hba->u.itl.iop);
+}
+
+static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
+{
+       hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
+       if (hba->u.mv.regs == NULL)
+               return -1;
+
+       hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
+       if (hba->u.mv.mu == NULL) {
+               iounmap(hba->u.mv.regs);
                return -1;
        }
 
-       hba->iop = mem_base_virt;
-       dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
        return 0;
 }
 
+static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
+{
+       iounmap(hba->u.mv.regs);
+       iounmap(hba->u.mv.mu);
+}
+
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
 {
        dprintk("iop message 0x%x\n", msg);
 
+       if (msg == IOPMU_INBOUND_MSG0_NOP)
+               hba->msg_done = 1;
+
        if (!hba->initialized)
                return;
 
@@ -303,7 +525,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
                hba->msg_done = 1;
 }
 
-static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
+static struct hptiop_request *get_req(struct hptiop_hba *hba)
 {
        struct hptiop_request *ret;
 
@@ -316,30 +538,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
        return ret;
 }
 
-static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
+static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
 {
        dprintk("free_req(%d, %p)\n", req->index, req);
        req->next = hba->req_list;
        hba->req_list = req;
 }
 
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+                               struct hpt_iop_request_scsi_command *req)
 {
-       struct hpt_iop_request_scsi_command *req;
        struct scsi_cmnd *scp;
-       u32 tag;
 
-       if (hba->iopintf_v2) {
-               tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
-               req = hba->reqs[tag].req_virt;
-               if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
-                       req->header.result = IOP_RESULT_SUCCESS;
-       } else {
-               tag = _tag;
-               req = hba->reqs[tag].req_virt;
-       }
-
-       dprintk("hptiop_host_request_callback: req=%p, type=%d, "
+       dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
                        "result=%d, context=0x%x tag=%d\n",
                        req, req->header.type, req->header.result,
                        req->header.context, tag);
@@ -354,6 +565,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
 
        switch (le32_to_cpu(req->header.result)) {
        case IOP_RESULT_SUCCESS:
+               scsi_set_resid(scp,
+                       scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
                scp->result = (DID_OK<<16);
                break;
        case IOP_RESULT_BAD_TARGET:
@@ -371,17 +584,17 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
        case IOP_RESULT_INVALID_REQUEST:
                scp->result = (DID_ABORT<<16);
                break;
-       case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
+       case IOP_RESULT_CHECK_CONDITION:
+               scsi_set_resid(scp,
+                       scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
                scp->result = SAM_STAT_CHECK_CONDITION;
-               memset(&scp->sense_buffer,
-                               0, sizeof(scp->sense_buffer));
-               memcpy(&scp->sense_buffer,
-                       &req->sg_list, le32_to_cpu(req->dataxfer_length));
+               memcpy(scp->sense_buffer, &req->sg_list,
+                               min_t(size_t, SCSI_SENSE_BUFFERSIZE,
+                                       le32_to_cpu(req->dataxfer_length)));
                break;
 
        default:
-               scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
-                                       (DID_ABORT<<16);
+               scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
                break;
        }
 
@@ -390,15 +603,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
        free_req(hba, &hba->reqs[tag]);
 }
 
-void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
+{
+       struct hpt_iop_request_scsi_command *req;
+       u32 tag;
+
+       if (hba->iopintf_v2) {
+               tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
+               req = hba->reqs[tag].req_virt;
+               if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+                       req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+       } else {
+               tag = _tag;
+               req = hba->reqs[tag].req_virt;
+       }
+
+       hptiop_finish_scsi_req(hba, tag, req);
+}
+
+void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
 {
        struct hpt_iop_request_header __iomem *req;
        struct hpt_iop_request_ioctl_command __iomem *p;
        struct hpt_ioctl_k *arg;
 
        req = (struct hpt_iop_request_header __iomem *)
-                       ((unsigned long)hba->iop + tag);
-       dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
+                       ((unsigned long)hba->u.itl.iop + tag);
+       dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
                        "result=%d, context=0x%x tag=%d\n",
                        req, readl(&req->type), readl(&req->result),
                        readl(&req->context), tag);
@@ -426,7 +657,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
                arg->result = HPT_IOCTL_RESULT_FAILED;
 
        arg->done(arg);
-       writel(tag, &hba->iop->outbound_queue);
+       writel(tag, &hba->u.itl.iop->outbound_queue);
 }
 
 static irqreturn_t hptiop_intr(int irq, void *dev_id)
@@ -436,7 +667,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id)
        unsigned long flags;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       handled = __iop_intr(hba);
+       handled = hba->ops->iop_intr(hba);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        return handled;
@@ -468,6 +699,57 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
        return HPT_SCP(scp)->sgcnt;
 }
 
+static void hptiop_post_req_itl(struct hptiop_hba *hba,
+                                       struct hptiop_request *_req)
+{
+       struct hpt_iop_request_header *reqhdr = _req->req_virt;
+
+       reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+                                                       (u32)_req->index);
+       reqhdr->context_hi32 = 0;
+
+       if (hba->iopintf_v2) {
+               u32 size, size_bits;
+
+               size = le32_to_cpu(reqhdr->size);
+               if (size < 256)
+                       size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
+               else if (size < 512)
+                       size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
+               else
+                       size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
+                                               IOPMU_QUEUE_ADDR_HOST_BIT;
+               writel(_req->req_shifted_phy | size_bits,
+                       &hba->u.itl.iop->inbound_queue);
+       } else
+               writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
+                                       &hba->u.itl.iop->inbound_queue);
+}
+
+static void hptiop_post_req_mv(struct hptiop_hba *hba,
+                                       struct hptiop_request *_req)
+{
+       struct hpt_iop_request_header *reqhdr = _req->req_virt;
+       u32 size, size_bit;
+
+       reqhdr->context = cpu_to_le32(_req->index<<8 |
+                                       IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
+       reqhdr->context_hi32 = 0;
+       size = le32_to_cpu(reqhdr->size);
+
+       if (size <= 256)
+               size_bit = 0;
+       else if (size <= 256*2)
+               size_bit = 1;
+       else if (size <= 256*3)
+               size_bit = 2;
+       else
+               size_bit = 3;
+
+       mv_inbound_write((_req->req_shifted_phy << 5) |
+               MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
+}
+
 static int hptiop_queuecommand(struct scsi_cmnd *scp,
                                void (*done)(struct scsi_cmnd *))
 {
@@ -493,9 +775,9 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
                        scp,
                        host->host_no, scp->device->channel,
                        scp->device->id, scp->device->lun,
-                       *((u32 *)&scp->cmnd),
-                       *((u32 *)&scp->cmnd + 1),
-                       *((u32 *)&scp->cmnd + 2),
+                       ((u32 *)scp->cmnd)[0],
+                       ((u32 *)scp->cmnd)[1],
+                       ((u32 *)scp->cmnd)[2],
                        _req->index, _req->req_virt);
 
        scp->result = 0;
@@ -517,9 +799,6 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
        req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
        req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
        req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
-       req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
-                                                       (u32)_req->index);
-       req->header.context_hi32 = 0;
        req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
        req->channel = scp->device->channel;
        req->target = scp->device->id;
@@ -530,21 +809,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
                                 + sg_count * sizeof(struct hpt_iopsg));
 
        memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
-
-       if (hba->iopintf_v2) {
-               u32 size_bits;
-               if (req->header.size < 256)
-                       size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
-               else if (req->header.size < 512)
-                       size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
-               else
-                       size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
-                                               IOPMU_QUEUE_ADDR_HOST_BIT;
-               writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
-       } else
-               writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
-                                       &hba->iop->inbound_queue);
-
+       hba->ops->post_req(hba, _req);
        return 0;
 
 cmd_done:
@@ -562,16 +827,14 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
 {
        if (atomic_xchg(&hba->resetting, 1) == 0) {
                atomic_inc(&hba->reset_count);
-               writel(IOPMU_INBOUND_MSG0_RESET,
-                               &hba->iop->inbound_msgaddr0);
-               hptiop_pci_posting_flush(hba->iop);
+               hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
        }
 
        wait_event_timeout(hba->reset_wq,
                        atomic_read(&hba->resetting) == 0, 60 * HZ);
 
        if (atomic_read(&hba->resetting)) {
-               /* IOP is in unkown state, abort reset */
+               /* IOP is in unknown state, abort reset */
                printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
                return -1;
        }
@@ -598,22 +861,29 @@ static int hptiop_reset(struct scsi_cmnd *scp)
 }
 
 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
-                                               int queue_depth)
+                                         int queue_depth, int reason)
 {
-       if(queue_depth > 256)
-               queue_depth = 256;
+       struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
+
+       if (reason != SCSI_QDEPTH_DEFAULT)
+               return -EOPNOTSUPP;
+
+       if (queue_depth > hba->max_requests)
+               queue_depth = hba->max_requests;
        scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
        return queue_depth;
 }
 
-static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
+static ssize_t hptiop_show_version(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
        return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
 }
 
-static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
+static ssize_t hptiop_show_fw_version(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *host = class_to_shost(class_dev);
+       struct Scsi_Host *host = class_to_shost(dev);
        struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
 
        return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
@@ -623,7 +893,7 @@ static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
                                hba->firmware_version & 0xff);
 }
 
-static struct class_device_attribute hptiop_attr_version = {
+static struct device_attribute hptiop_attr_version = {
        .attr = {
                .name = "driver-version",
                .mode = S_IRUGO,
@@ -631,7 +901,7 @@ static struct class_device_attribute hptiop_attr_version = {
        .show = hptiop_show_version,
 };
 
-static struct class_device_attribute hptiop_attr_fw_version = {
+static struct device_attribute hptiop_attr_fw_version = {
        .attr = {
                .name = "firmware-version",
                .mode = S_IRUGO,
@@ -639,7 +909,7 @@ static struct class_device_attribute hptiop_attr_fw_version = {
        .show = hptiop_show_fw_version,
 };
 
-static struct class_device_attribute *hptiop_attrs[] = {
+static struct device_attribute *hptiop_attrs[] = {
        &hptiop_attr_version,
        &hptiop_attr_fw_version,
        NULL
@@ -652,16 +922,34 @@ static struct scsi_host_template driver_template = {
        .eh_device_reset_handler    = hptiop_reset,
        .eh_bus_reset_handler       = hptiop_reset,
        .info                       = hptiop_info,
-       .unchecked_isa_dma          = 0,
        .emulated                   = 0,
        .use_clustering             = ENABLE_CLUSTERING,
-       .use_sg_chaining            = ENABLE_SG_CHAINING,
        .proc_name                  = driver_name,
        .shost_attrs                = hptiop_attrs,
        .this_id                    = -1,
        .change_queue_depth         = hptiop_adjust_disk_queue_depth,
 };
 
+static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
+{
+       hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
+                       0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
+       if (hba->u.mv.internal_req)
+               return 0;
+       else
+               return -1;
+}
+
+static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
+{
+       if (hba->u.mv.internal_req) {
+               dma_free_coherent(&hba->pcidev->dev, 0x800,
+                       hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
+               return 0;
+       } else
+               return -1;
+}
+
 static int __devinit hptiop_probe(struct pci_dev *pcidev,
                                        const struct pci_device_id *id)
 {
@@ -687,8 +975,8 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
        pci_set_master(pcidev);
 
        /* Enable 64bit DMA if possible */
-       if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
-               if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
+       if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+               if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
                        printk(KERN_ERR "hptiop: fail to set dma_mask\n");
                        goto disable_pci_device;
                }
@@ -707,6 +995,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 
        hba = (struct hptiop_hba *)host->hostdata;
 
+       hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
        hba->pcidev = pcidev;
        hba->host = host;
        hba->initialized = 0;
@@ -724,16 +1013,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
        host->n_io_port = 0;
        host->irq = pcidev->irq;
 
-       if (hptiop_map_pci_bar(hba))
+       if (hba->ops->map_pci_bar(hba))
                goto free_scsi_host;
 
-       if (iop_wait_ready(hba->iop, 20000)) {
+       if (hba->ops->iop_wait_ready(hba, 20000)) {
                printk(KERN_ERR "scsi%d: firmware not ready\n",
                                hba->host->host_no);
                goto unmap_pci_bar;
        }
 
-       if (iop_get_config(hba, &iop_config)) {
+       if (hba->ops->internal_memalloc) {
+               if (hba->ops->internal_memalloc(hba)) {
+                       printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+                               hba->host->host_no);
+                       goto unmap_pci_bar;
+               }
+       }
+
+       if (hba->ops->get_config(hba, &iop_config)) {
                printk(KERN_ERR "scsi%d: get config failed\n",
                                hba->host->host_no);
                goto unmap_pci_bar;
@@ -769,7 +1066,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
        set_config.vbus_id = cpu_to_le16(host->host_no);
        set_config.max_host_request_size = cpu_to_le16(req_size);
 
-       if (iop_set_config(hba, &set_config)) {
+       if (hba->ops->set_config(hba, &set_config)) {
                printk(KERN_ERR "scsi%d: set config failed\n",
                                hba->host->host_no);
                goto unmap_pci_bar;
@@ -838,21 +1135,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 
 free_request_mem:
        dma_free_coherent(&hba->pcidev->dev,
-                       hba->req_size*hba->max_requests + 0x20,
+                       hba->req_size * hba->max_requests + 0x20,
                        hba->dma_coherent, hba->dma_coherent_handle);
 
 free_request_irq:
        free_irq(hba->pcidev->irq, hba);
 
 unmap_pci_bar:
-       iounmap(hba->iop);
+       if (hba->ops->internal_memfree)
+               hba->ops->internal_memfree(hba);
 
-free_pci_regions:
-       pci_release_regions(pcidev) ;
+       hba->ops->unmap_pci_bar(hba);
 
 free_scsi_host:
        scsi_host_put(host);
 
+free_pci_regions:
+       pci_release_regions(pcidev);
+
 disable_pci_device:
        pci_disable_device(pcidev);
 
@@ -864,8 +1164,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
 {
        struct Scsi_Host *host = pci_get_drvdata(pcidev);
        struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
-       struct hpt_iopmu __iomem *iop = hba->iop;
-       u32    int_mask;
 
        dprintk("hptiop_shutdown(%p)\n", hba);
 
@@ -875,11 +1173,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
                                        hba->host->host_no);
 
        /* disable all outbound interrupts */
-       int_mask = readl(&iop->outbound_intmask);
+       hba->ops->disable_intr(hba);
+}
+
+static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
+{
+       u32 int_mask;
+
+       int_mask = readl(&hba->u.itl.iop->outbound_intmask);
        writel(int_mask |
                IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
-               &iop->outbound_intmask);
-       hptiop_pci_posting_flush(iop);
+               &hba->u.itl.iop->outbound_intmask);
+       readl(&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
+{
+       writel(0, &hba->u.mv.regs->outbound_intmask);
+       readl(&hba->u.mv.regs->outbound_intmask);
 }
 
 static void hptiop_remove(struct pci_dev *pcidev)
@@ -900,7 +1211,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
                        hba->dma_coherent,
                        hba->dma_coherent_handle);
 
-       iounmap(hba->iop);
+       if (hba->ops->internal_memfree)
+               hba->ops->internal_memfree(hba);
+
+       hba->ops->unmap_pci_bar(hba);
 
        pci_release_regions(hba->pcidev);
        pci_set_drvdata(hba->pcidev, NULL);
@@ -909,11 +1223,59 @@ static void hptiop_remove(struct pci_dev *pcidev)
        scsi_host_put(host);
 }
 
+static struct hptiop_adapter_ops hptiop_itl_ops = {
+       .iop_wait_ready    = iop_wait_ready_itl,
+       .internal_memalloc = NULL,
+       .internal_memfree  = NULL,
+       .map_pci_bar       = hptiop_map_pci_bar_itl,
+       .unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
+       .enable_intr       = hptiop_enable_intr_itl,
+       .disable_intr      = hptiop_disable_intr_itl,
+       .get_config        = iop_get_config_itl,
+       .set_config        = iop_set_config_itl,
+       .iop_intr          = iop_intr_itl,
+       .post_msg          = hptiop_post_msg_itl,
+       .post_req          = hptiop_post_req_itl,
+};
+
+static struct hptiop_adapter_ops hptiop_mv_ops = {
+       .iop_wait_ready    = iop_wait_ready_mv,
+       .internal_memalloc = hptiop_internal_memalloc_mv,
+       .internal_memfree  = hptiop_internal_memfree_mv,
+       .map_pci_bar       = hptiop_map_pci_bar_mv,
+       .unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
+       .enable_intr       = hptiop_enable_intr_mv,
+       .disable_intr      = hptiop_disable_intr_mv,
+       .get_config        = iop_get_config_mv,
+       .set_config        = iop_set_config_mv,
+       .iop_intr          = iop_intr_mv,
+       .post_msg          = hptiop_post_msg_mv,
+       .post_req          = hptiop_post_req_mv,
+};
+
 static struct pci_device_id hptiop_id_table[] = {
-       { PCI_VDEVICE(TTI, 0x3220) },
-       { PCI_VDEVICE(TTI, 0x3320) },
-       { PCI_VDEVICE(TTI, 0x3520) },
-       { PCI_VDEVICE(TTI, 0x4320) },
+       { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
+       { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
+       { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
        {},
 };