of: Always use 'struct device.of_node' to get device node pointer.
[safe/jmp/linux-2.6] / drivers / scsi / ibmvscsi / ibmvscsi.c
index f5da762..cc38fef 100644 (file)
@@ -40,7 +40,7 @@
  * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
  * Senders cannot access the buffer directly, but send messages by
  * making a hypervisor call and passing in the 16 bytes.  The hypervisor
- * puts the message in the next 16 byte space in round-robbin fashion,
+ * puts the message in the next 16 byte space in round-robin fashion,
  * turns on the high order bit of the message (the valid bit), and 
  * generates an interrupt to the receiver (if interrupts are turned on.) 
  * The receiver just turns off the valid bit when they have copied out
 #include <linux/moduleparam.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <asm/firmware.h>
 #include <asm/vio.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_srp.h>
 #include "ibmvscsi.h"
 
 /* The values below are somewhat arbitrary default values, but 
  */
 static int max_id = 64;
 static int max_channel = 3;
-static int init_timeout = 5;
+static int init_timeout = 300;
+static int login_timeout = 60;
+static int info_timeout = 30;
+static int abort_timeout = 60;
+static int reset_timeout = 60;
 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
+static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
+static int fast_fail = 1;
+static int client_reserve = 1;
+
+static struct scsi_transport_template *ibmvscsi_transport_template;
 
 #define IBMVSCSI_VERSION "1.5.8"
 
+static struct ibmvscsi_ops *ibmvscsi_ops;
+
 MODULE_DESCRIPTION("IBM Virtual SCSI");
 MODULE_AUTHOR("Dave Boutcher");
 MODULE_LICENSE("GPL");
@@ -100,8 +116,12 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_channel, "Largest channel value");
 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
-module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
+module_param_named(max_requests, max_requests, int, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
+module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
+module_param_named(client_reserve, client_reserve, int, S_IRUGO );
+MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 
 /* ------------------------------------------------------------
  * Routines for the event pool and event structs
@@ -350,20 +370,19 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
        }
 }
 
-static int map_sg_list(int num_entries, 
-                      struct scatterlist *sg,
+static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
                       struct srp_direct_buf *md)
 {
        int i;
+       struct scatterlist *sg;
        u64 total_length = 0;
 
-       for (i = 0; i < num_entries; ++i) {
+       scsi_for_each_sg(cmd, sg, nseg, i) {
                struct srp_direct_buf *descr = md + i;
-               struct scatterlist *sg_entry = &sg[i];
-               descr->va = sg_dma_address(sg_entry);
-               descr->len = sg_dma_len(sg_entry);
+               descr->va = sg_dma_address(sg);
+               descr->len = sg_dma_len(sg);
                descr->key = 0;
-               total_length += sg_dma_len(sg_entry);
+               total_length += sg_dma_len(sg);
        }
        return total_length;
 }
@@ -384,24 +403,22 @@ static int map_sg_data(struct scsi_cmnd *cmd,
 
        int sg_mapped;
        u64 total_length = 0;
-       struct scatterlist *sg = cmd->request_buffer;
        struct srp_direct_buf *data =
                (struct srp_direct_buf *) srp_cmd->add_data;
        struct srp_indirect_buf *indirect =
                (struct srp_indirect_buf *) data;
 
-       sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
-
-       if (sg_mapped == 0)
+       sg_mapped = scsi_dma_map(cmd);
+       if (!sg_mapped)
+               return 1;
+       else if (sg_mapped < 0)
                return 0;
 
        set_srp_direction(cmd, srp_cmd, sg_mapped);
 
        /* special case; we can use a single direct descriptor */
        if (sg_mapped == 1) {
-               data->va = sg_dma_address(&sg[0]);
-               data->len = sg_dma_len(&sg[0]);
-               data->key = 0;
+               map_sg_list(cmd, sg_mapped, data);
                return 1;
        }
 
@@ -410,7 +427,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
        indirect->table_desc.key = 0;
 
        if (sg_mapped <= MAX_INDIRECT_BUFS) {
-               total_length = map_sg_list(sg_mapped, sg,
+               total_length = map_sg_list(cmd, sg_mapped,
                                           &indirect->desc_list[0]);
                indirect->len = total_length;
                return 1;
@@ -419,60 +436,30 @@ static int map_sg_data(struct scsi_cmnd *cmd,
        /* get indirect table */
        if (!evt_struct->ext_list) {
                evt_struct->ext_list = (struct srp_direct_buf *)
-                       dma_alloc_coherent(dev, 
+                       dma_alloc_coherent(dev,
                                           SG_ALL * sizeof(struct srp_direct_buf),
                                           &evt_struct->ext_list_token, 0);
                if (!evt_struct->ext_list) {
-                       sdev_printk(KERN_ERR, cmd->device,
-                                   "Can't allocate memory for indirect table\n");
+                       if (!firmware_has_feature(FW_FEATURE_CMO))
+                               sdev_printk(KERN_ERR, cmd->device,
+                                           "Can't allocate memory "
+                                           "for indirect table\n");
+                       scsi_dma_unmap(cmd);
                        return 0;
                }
        }
 
-       total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);        
+       total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
 
        indirect->len = total_length;
        indirect->table_desc.va = evt_struct->ext_list_token;
        indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
        memcpy(indirect->desc_list, evt_struct->ext_list,
               MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
-       
        return 1;
 }
 
 /**
- * map_single_data: - Maps memory and initializes memory decriptor fields
- * @cmd:       struct scsi_cmnd with the memory to be mapped
- * @srp_cmd:   srp_cmd that contains the memory descriptor
- * @dev:       device for which to map dma memory
- *
- * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
- * Returns 1 on success.
-*/
-static int map_single_data(struct scsi_cmnd *cmd,
-                          struct srp_cmd *srp_cmd, struct device *dev)
-{
-       struct srp_direct_buf *data =
-               (struct srp_direct_buf *) srp_cmd->add_data;
-
-       data->va =
-               dma_map_single(dev, cmd->request_buffer,
-                              cmd->request_bufflen,
-                              DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(data->va)) {
-               sdev_printk(KERN_ERR, cmd->device,
-                           "Unable to map request_buffer for command!\n");
-               return 0;
-       }
-       data->len = cmd->request_bufflen;
-       data->key = 0;
-
-       set_srp_direction(cmd, srp_cmd, 1);
-
-       return 1;
-}
-
-/**
  * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  * @cmd:       struct scsi_cmnd with the memory to be mapped
  * @srp_cmd:   srp_cmd that contains the memory descriptor
@@ -502,11 +489,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
                return 0;
        }
 
-       if (!cmd->request_buffer)
-               return 1;
-       if (cmd->use_sg)
-               return map_sg_data(cmd, evt_struct, srp_cmd, dev);
-       return map_single_data(cmd, srp_cmd, dev);
+       return map_sg_data(cmd, evt_struct, srp_cmd, dev);
 }
 
 /**
@@ -546,8 +529,8 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
        atomic_set(&hostdata->request_limit, 0);
 
        purge_requests(hostdata, DID_ERROR);
-       if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
-           (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
+       if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata)) ||
+           (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0)) ||
            (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
                atomic_set(&hostdata->request_limit, -1);
                dev_err(hostdata->dev, "error after reset\n");
@@ -590,7 +573,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
                                   unsigned long timeout)
 {
        u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
-       int request_status;
+       int request_status = 0;
        int rc;
 
        /* If we have exhausted our request limit, just fail this request,
@@ -608,6 +591,13 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
                if (request_status < -1)
                        goto send_error;
                /* Otherwise, we may have run out of requests. */
+               /* If request limit was 0 when we started the adapter is in the
+                * process of performing a login with the server adapter, or
+                * we may have run out of requests.
+                */
+               else if (request_status == -1 &&
+                        evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+                       goto send_busy;
                /* Abort and reset calls should make it through.
                 * Nothing except abort and reset should use the last two
                 * slots unless we had two or less to begin with.
@@ -652,10 +642,20 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
        }
 
        if ((rc =
-            ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+            ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
                list_del(&evt_struct->list);
                del_timer(&evt_struct->timer);
 
+               /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+                * Firmware will send a CRQ with a transport event (0xFF) to
+                * tell this client what has happened to the transport.  This
+                * will be handled in ibmvscsi_handle_crq()
+                */
+               if (rc == H_CLOSED) {
+                       dev_warn(hostdata->dev, "send warning. "
+                                "Receive queue closed, will retry.\n");
+                       goto send_busy;
+               }
                dev_err(hostdata->dev, "send error %d\n", rc);
                atomic_inc(&hostdata->request_limit);
                goto send_error;
@@ -667,7 +667,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 
        free_event_struct(&hostdata->pool, evt_struct);
-       atomic_inc(&hostdata->request_limit);
+       if (request_status != -1)
+               atomic_inc(&hostdata->request_limit);
        return SCSI_MLQUEUE_HOST_BUSY;
 
  send_error:
@@ -702,7 +703,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
        }
        
        if (cmnd) {
-               cmnd->result = rsp->status;
+               cmnd->result |= rsp->status;
                if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
                        memcpy(cmnd->sense_buffer,
                               rsp->data,
@@ -712,9 +713,9 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
                               evt_struct->hostdata->dev);
 
                if (rsp->flags & SRP_RSP_FLAG_DOOVER)
-                       cmnd->resid = rsp->data_out_res_cnt;
+                       scsi_set_resid(cmnd, rsp->data_out_res_cnt);
                else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
-                       cmnd->resid = rsp->data_in_res_cnt;
+                       scsi_set_resid(cmnd, rsp->data_in_res_cnt);
        }
 
        if (evt_struct->cmnd_done)
@@ -742,11 +743,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
        struct srp_cmd *srp_cmd;
        struct srp_event_struct *evt_struct;
        struct srp_indirect_buf *indirect;
-       struct ibmvscsi_host_data *hostdata =
-               (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
        u16 lun = lun_from_dev(cmnd->device);
        u8 out_fmt, in_fmt;
 
+       cmnd->result = (DID_OK << 16);
        evt_struct = get_event_struct(&hostdata->pool);
        if (!evt_struct)
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -755,11 +756,13 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
        srp_cmd = &evt_struct->iu.srp.cmd;
        memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
        srp_cmd->opcode = SRP_CMD;
-       memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
+       memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
        srp_cmd->lun = ((u64) lun) << 48;
 
        if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
-               sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
+               if (!firmware_has_feature(FW_FEATURE_CMO))
+                       sdev_printk(KERN_ERR, cmnd->device,
+                                   "couldn't convert cmd to srp_cmd\n");
                free_event_struct(&hostdata->pool, evt_struct);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
@@ -767,7 +770,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
        init_event_struct(evt_struct,
                          handle_cmd_rsp,
                          VIOSRP_SRP_FORMAT,
-                         cmnd->timeout_per_command/HZ);
+                         cmnd->request->timeout/HZ);
 
        evt_struct->cmnd = cmnd;
        evt_struct->cmnd_done = done;
@@ -790,102 +793,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
 /* ------------------------------------------------------------
  * Routines for driver initialization
  */
+
 /**
- * adapter_info_rsp: - Handle response to MAD adapter info request
- * @evt_struct:        srp_event_struct with the response
+ * map_persist_bufs: - Pre-map persistent data for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
  *
- * Used as a "done" callback by when sending adapter_info. Gets called
- * by ibmvscsi_handle_crq()
-*/
-static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+ * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
+ * Return 1 on error, 0 on success.
+ */
+static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
-       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
-       dma_unmap_single(hostdata->dev,
-                        evt_struct->iu.mad.adapter_info.buffer,
-                        evt_struct->iu.mad.adapter_info.common.length,
-                        DMA_BIDIRECTIONAL);
 
-       if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
-               dev_err(hostdata->dev, "error %d getting adapter info\n",
-                       evt_struct->xfer_iu->mad.adapter_info.common.status);
-       } else {
-               dev_info(hostdata->dev, "host srp version: %s, "
-                        "host partition %s (%d), OS %d, max io %u\n",
-                        hostdata->madapter_info.srp_version,
-                        hostdata->madapter_info.partition_name,
-                        hostdata->madapter_info.partition_number,
-                        hostdata->madapter_info.os_type,
-                        hostdata->madapter_info.port_max_txu[0]);
-               
-               if (hostdata->madapter_info.port_max_txu[0]) 
-                       hostdata->host->max_sectors = 
-                               hostdata->madapter_info.port_max_txu[0] >> 9;
-               
-               if (hostdata->madapter_info.os_type == 3 &&
-                   strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
-                       dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
-                               hostdata->madapter_info.srp_version);
-                       dev_err(hostdata->dev, "limiting scatterlists to %d\n",
-                               MAX_INDIRECT_BUFS);
-                       hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
-               }
+       hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
+                                            sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
+               dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
+               return 1;
+       }
+
+       hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
+                                                    &hostdata->madapter_info,
+                                                    sizeof(hostdata->madapter_info),
+                                                    DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
+               dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
+               dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+                                sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+               return 1;
        }
+
+       return 0;
 }
 
 /**
- * send_mad_adapter_info: - Sends the mad adapter info request
- *      and stores the result so it can be retrieved with
- *      sysfs.  We COULD consider causing a failure if the
- *      returned SRP version doesn't match ours.
- * @hostdata:  ibmvscsi_host_data of host
- * 
- * Returns zero if successful.
-*/
-static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+ * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
+ *
+ * Unmap the capabilities and adapter info DMA buffers
+ */
+static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
-       struct viosrp_adapter_info *req;
-       struct srp_event_struct *evt_struct;
-       unsigned long flags;
-       dma_addr_t addr;
-
-       evt_struct = get_event_struct(&hostdata->pool);
-       if (!evt_struct) {
-               dev_err(hostdata->dev,
-                       "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
-               return;
-       }
-
-       init_event_struct(evt_struct,
-                         adapter_info_rsp,
-                         VIOSRP_MAD_FORMAT,
-                         init_timeout * HZ);
-       
-       req = &evt_struct->iu.mad.adapter_info;
-       memset(req, 0x00, sizeof(*req));
-       
-       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
-       req->common.length = sizeof(hostdata->madapter_info);
-       req->buffer = addr = dma_map_single(hostdata->dev,
-                                           &hostdata->madapter_info,
-                                           sizeof(hostdata->madapter_info),
-                                           DMA_BIDIRECTIONAL);
+       dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+                        sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(req->buffer)) {
-               dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
-               free_event_struct(&hostdata->pool, evt_struct);
-               return;
-       }
-       
-       spin_lock_irqsave(hostdata->host->host_lock, flags);
-       if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
-               dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
-               dma_unmap_single(hostdata->dev,
-                                addr,
-                                sizeof(hostdata->madapter_info),
-                                DMA_BIDIRECTIONAL);
-       }
-       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-};
+       dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
+                        sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
+}
 
 /**
  * login_rsp: - Handle response to SRP login request
@@ -915,9 +869,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
        }
 
        dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
-
-       if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
-               dev_err(hostdata->dev, "Invalid request_limit.\n");
+       hostdata->client_migrated = 0;
 
        /* Now we know what the real request-limit is.
         * This value is set rather than added to request_limit because
@@ -928,15 +880,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
 
        /* If we had any pending I/Os, kick them */
        scsi_unblock_requests(hostdata->host);
-
-       send_mad_adapter_info(hostdata);
-       return;
 }
 
 /**
  * send_srp_login: - Sends the srp login
  * @hostdata:  ibmvscsi_host_data of host
- * 
+ *
  * Returns zero if successful.
 */
 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -945,35 +894,264 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
        unsigned long flags;
        struct srp_login_req *login;
        struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
-       if (!evt_struct) {
-               dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
-               return FAILED;
-       }
 
-       init_event_struct(evt_struct,
-                         login_rsp,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout * HZ);
+       BUG_ON(!evt_struct);
+       init_event_struct(evt_struct, login_rsp,
+                         VIOSRP_SRP_FORMAT, login_timeout);
 
        login = &evt_struct->iu.srp.login_req;
-       memset(login, 0x00, sizeof(struct srp_login_req));
+       memset(login, 0, sizeof(*login));
        login->opcode = SRP_LOGIN_REQ;
        login->req_it_iu_len = sizeof(union srp_iu);
        login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
-       
+
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       /* Start out with a request limit of 1, since this is negotiated in
-        * the login request we are just sending
+       /* Start out with a request limit of 0, since this is negotiated in
+        * the login request we are just sending and login requests always
+        * get sent by the driver regardless of request_limit.
         */
-       atomic_set(&hostdata->request_limit, 1);
+       atomic_set(&hostdata->request_limit, 0);
 
-       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
        dev_info(hostdata->dev, "sent SRP login\n");
        return rc;
 };
 
 /**
+ * capabilities_rsp: - Handle response to MAD adapter capabilities request
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info.
+ */
+static void capabilities_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       if (evt_struct->xfer_iu->mad.capabilities.common.status) {
+               dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
+                       evt_struct->xfer_iu->mad.capabilities.common.status);
+       } else {
+               if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+                       dev_info(hostdata->dev, "Partition migration not supported\n");
+
+               if (client_reserve) {
+                       if (hostdata->caps.reserve.common.server_support ==
+                           SERVER_SUPPORTS_CAP)
+                               dev_info(hostdata->dev, "Client reserve enabled\n");
+                       else
+                               dev_info(hostdata->dev, "Client reserve not supported\n");
+               }
+       }
+
+       send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_capabilities: - Sends the mad capabilities request
+ *      and stores the result so it can be retrieved with
+ * @hostdata:  ibmvscsi_host_data of host
+ */
+static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
+{
+       struct viosrp_capabilities *req;
+       struct srp_event_struct *evt_struct;
+       unsigned long flags;
+       struct device_node *of_node = hostdata->dev->of_node;
+       const char *location;
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct, capabilities_rsp,
+                         VIOSRP_MAD_FORMAT, info_timeout);
+
+       req = &evt_struct->iu.mad.capabilities;
+       memset(req, 0, sizeof(*req));
+
+       hostdata->caps.flags = CAP_LIST_SUPPORTED;
+       if (hostdata->client_migrated)
+               hostdata->caps.flags |= CLIENT_MIGRATED;
+
+       strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+               sizeof(hostdata->caps.name));
+       hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
+
+       location = of_get_property(of_node, "ibm,loc-code", NULL);
+       location = location ? location : dev_name(hostdata->dev);
+       strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
+       hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+
+       req->common.type = VIOSRP_CAPABILITIES_TYPE;
+       req->buffer = hostdata->caps_addr;
+
+       hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
+       hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
+       hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
+       hostdata->caps.migration.ecl = 1;
+
+       if (client_reserve) {
+               hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
+               hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
+               hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
+               hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
+               req->common.length = sizeof(hostdata->caps);
+       } else
+               req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+               dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * fast_fail_rsp: - Handle response to MAD enable fast fail
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending enable fast fail. Gets called
+ * by ibmvscsi_handle_crq()
+ */
+static void fast_fail_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+       u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+
+       if (status == VIOSRP_MAD_NOT_SUPPORTED)
+               dev_err(hostdata->dev, "fast_fail not supported in server\n");
+       else if (status == VIOSRP_MAD_FAILED)
+               dev_err(hostdata->dev, "fast_fail request failed\n");
+       else if (status != VIOSRP_MAD_SUCCESS)
+               dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
+
+       send_mad_capabilities(hostdata);
+}
+
+/**
+ * init_host - Start host initialization
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+ */
+static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
+{
+       int rc;
+       unsigned long flags;
+       struct viosrp_fast_fail *fast_fail_mad;
+       struct srp_event_struct *evt_struct;
+
+       if (!fast_fail) {
+               send_mad_capabilities(hostdata);
+               return 0;
+       }
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
+
+       fast_fail_mad = &evt_struct->iu.mad.fast_fail;
+       memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
+       fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
+       fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+       return rc;
+}
+
+/**
+ * adapter_info_rsp: - Handle response to MAD adapter info request
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
+               dev_err(hostdata->dev, "error %d getting adapter info\n",
+                       evt_struct->xfer_iu->mad.adapter_info.common.status);
+       } else {
+               dev_info(hostdata->dev, "host srp version: %s, "
+                        "host partition %s (%d), OS %d, max io %u\n",
+                        hostdata->madapter_info.srp_version,
+                        hostdata->madapter_info.partition_name,
+                        hostdata->madapter_info.partition_number,
+                        hostdata->madapter_info.os_type,
+                        hostdata->madapter_info.port_max_txu[0]);
+               
+               if (hostdata->madapter_info.port_max_txu[0]) 
+                       hostdata->host->max_sectors = 
+                               hostdata->madapter_info.port_max_txu[0] >> 9;
+               
+               if (hostdata->madapter_info.os_type == 3 &&
+                   strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+                       dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+                               hostdata->madapter_info.srp_version);
+                       dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+                               MAX_INDIRECT_BUFS);
+                       hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+               }
+
+               if (hostdata->madapter_info.os_type == 3) {
+                       enable_fast_fail(hostdata);
+                       return;
+               }
+       }
+
+       send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_adapter_info: - Sends the mad adapter info request
+ *      and stores the result so it can be retrieved with
+ *      sysfs.  We COULD consider causing a failure if the
+ *      returned SRP version doesn't match ours.
+ * @hostdata:  ibmvscsi_host_data of host
+ * 
+ * Returns zero if successful.
+*/
+static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+       struct viosrp_adapter_info *req;
+       struct srp_event_struct *evt_struct;
+       unsigned long flags;
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct,
+                         adapter_info_rsp,
+                         VIOSRP_MAD_FORMAT,
+                         info_timeout);
+       
+       req = &evt_struct->iu.mad.adapter_info;
+       memset(req, 0x00, sizeof(*req));
+       
+       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
+       req->common.length = sizeof(hostdata->madapter_info);
+       req->buffer = hostdata->adapter_info_addr;
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+               dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * init_adapter: Start virtual adapter initialization sequence
+ *
+ */
+static void init_adapter(struct ibmvscsi_host_data *hostdata)
+{
+       send_mad_adapter_info(hostdata);
+}
+
+/**
  * sync_completion: Signal that a synchronous command has completed
  * Note that after returning from this call, the evt_struct is freed.
  * the caller waiting on this completion shouldn't touch the evt_struct
@@ -994,8 +1172,7 @@ static void sync_completion(struct srp_event_struct *evt_struct)
  */
 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 {
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
        struct srp_tsk_mgmt *tsk_mgmt;
        struct srp_event_struct *evt;
        struct srp_event_struct *tmp_evt, *found_evt;
@@ -1003,58 +1180,74 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
        int rsp_rc;
        unsigned long flags;
        u16 lun = lun_from_dev(cmd->device);
+       unsigned long wait_switch = 0;
 
        /* First, find this command in our sent list so we can figure
         * out the correct tag
         */
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       found_evt = NULL;
-       list_for_each_entry(tmp_evt, &hostdata->sent, list) {
-               if (tmp_evt->cmnd == cmd) {
-                       found_evt = tmp_evt;
-                       break;
+       wait_switch = jiffies + (init_timeout * HZ);
+       do {
+               found_evt = NULL;
+               list_for_each_entry(tmp_evt, &hostdata->sent, list) {
+                       if (tmp_evt->cmnd == cmd) {
+                               found_evt = tmp_evt;
+                               break;
+                       }
                }
-       }
 
-       if (!found_evt) {
-               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               return SUCCESS;
-       }
+               if (!found_evt) {
+                       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+                       return SUCCESS;
+               }
 
-       evt = get_event_struct(&hostdata->pool);
-       if (evt == NULL) {
-               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
-               return FAILED;
-       }
+               evt = get_event_struct(&hostdata->pool);
+               if (evt == NULL) {
+                       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+                       sdev_printk(KERN_ERR, cmd->device,
+                               "failed to allocate abort event\n");
+                       return FAILED;
+               }
        
-       init_event_struct(evt,
-                         sync_completion,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout * HZ);
+               init_event_struct(evt,
+                                 sync_completion,
+                                 VIOSRP_SRP_FORMAT,
+                                 abort_timeout);
 
-       tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+               tsk_mgmt = &evt->iu.srp.tsk_mgmt;
        
-       /* Set up an abort SRP command */
-       memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
-       tsk_mgmt->opcode = SRP_TSK_MGMT;
-       tsk_mgmt->lun = ((u64) lun) << 48;
-       tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
-       tsk_mgmt->task_tag = (u64) found_evt;
-
-       sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
-                   tsk_mgmt->lun, tsk_mgmt->task_tag);
-
-       evt->sync_srp = &srp_rsp;
-       init_completion(&evt->comp);
-       rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+               /* Set up an abort SRP command */
+               memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+               tsk_mgmt->opcode = SRP_TSK_MGMT;
+               tsk_mgmt->lun = ((u64) lun) << 48;
+               tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
+               tsk_mgmt->task_tag = (u64) found_evt;
+
+               evt->sync_srp = &srp_rsp;
+
+               init_completion(&evt->comp);
+               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
+
+               if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+                       break;
+
+               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+               msleep(10);
+               spin_lock_irqsave(hostdata->host->host_lock, flags);
+       } while (time_before(jiffies, wait_switch));
+
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
        if (rsp_rc != 0) {
                sdev_printk(KERN_ERR, cmd->device,
                            "failed to send abort() event. rc=%d\n", rsp_rc);
                return FAILED;
        }
 
+       sdev_printk(KERN_INFO, cmd->device,
+                    "aborting command. lun 0x%llx, tag 0x%llx\n",
+                   (((u64) lun) << 48), (u64) found_evt);
+
        wait_for_completion(&evt->comp);
 
        /* make sure we got a good response */
@@ -1073,7 +1266,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
        if (rsp_rc) {
                if (printk_ratelimit())
                        sdev_printk(KERN_WARNING, cmd->device,
-                                   "abort code %d for task tag 0x%lx\n",
+                                   "abort code %d for task tag 0x%llx\n",
                                    rsp_rc, tsk_mgmt->task_tag);
                return FAILED;
        }
@@ -1093,12 +1286,12 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 
        if (found_evt == NULL) {
                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
+               sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
                            tsk_mgmt->task_tag);
                return SUCCESS;
        }
 
-       sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
+       sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
                    tsk_mgmt->task_tag);
 
        cmd->result = (DID_ABORT << 16);
@@ -1118,9 +1311,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
  */
 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
-
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
        struct srp_tsk_mgmt *tsk_mgmt;
        struct srp_event_struct *evt;
        struct srp_event_struct *tmp_evt, *pos;
@@ -1128,41 +1319,56 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
        int rsp_rc;
        unsigned long flags;
        u16 lun = lun_from_dev(cmd->device);
+       unsigned long wait_switch = 0;
 
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       evt = get_event_struct(&hostdata->pool);
-       if (evt == NULL) {
-               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
-               return FAILED;
-       }
+       wait_switch = jiffies + (init_timeout * HZ);
+       do {
+               evt = get_event_struct(&hostdata->pool);
+               if (evt == NULL) {
+                       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+                       sdev_printk(KERN_ERR, cmd->device,
+                               "failed to allocate reset event\n");
+                       return FAILED;
+               }
        
-       init_event_struct(evt,
-                         sync_completion,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout * HZ);
+               init_event_struct(evt,
+                                 sync_completion,
+                                 VIOSRP_SRP_FORMAT,
+                                 reset_timeout);
 
-       tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+               tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 
-       /* Set up a lun reset SRP command */
-       memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
-       tsk_mgmt->opcode = SRP_TSK_MGMT;
-       tsk_mgmt->lun = ((u64) lun) << 48;
-       tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
+               /* Set up a lun reset SRP command */
+               memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+               tsk_mgmt->opcode = SRP_TSK_MGMT;
+               tsk_mgmt->lun = ((u64) lun) << 48;
+               tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
+
+               evt->sync_srp = &srp_rsp;
+
+               init_completion(&evt->comp);
+               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
 
-       sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
-                   tsk_mgmt->lun);
+               if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+                       break;
+
+               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+               msleep(10);
+               spin_lock_irqsave(hostdata->host->host_lock, flags);
+       } while (time_before(jiffies, wait_switch));
 
-       evt->sync_srp = &srp_rsp;
-       init_completion(&evt->comp);
-       rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
        if (rsp_rc != 0) {
                sdev_printk(KERN_ERR, cmd->device,
                            "failed to send reset event. rc=%d\n", rsp_rc);
                return FAILED;
        }
 
+       sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
+                   (((u64) lun) << 48));
+
        wait_for_completion(&evt->comp);
 
        /* make sure we got a good response */
@@ -1181,7 +1387,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
        if (rsp_rc) {
                if (printk_ratelimit())
                        sdev_printk(KERN_WARNING, cmd->device,
-                                   "reset code %d for task tag 0x%lx\n",
+                                   "reset code %d for task tag 0x%llx\n",
                                    rsp_rc, tsk_mgmt->task_tag);
                return FAILED;
        }
@@ -1217,8 +1423,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
        unsigned long wait_switch = 0;
-       struct ibmvscsi_host_data *hostdata =
-               (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
 
        dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
 
@@ -1256,10 +1461,10 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                case 0x01:      /* Initialization message */
                        dev_info(hostdata->dev, "partner initialized\n");
                        /* Send back a response */
-                       if ((rc = ibmvscsi_send_crq(hostdata,
-                                                   0xC002000000000000LL, 0)) == 0) {
+                       if ((rc = ibmvscsi_ops->send_crq(hostdata,
+                                                        0xC002000000000000LL, 0)) == 0) {
                                /* Now login */
-                               send_srp_login(hostdata);
+                               init_adapter(hostdata);
                        } else {
                                dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
                        }
@@ -1269,7 +1474,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                        dev_info(hostdata->dev, "partner initialization complete\n");
 
                        /* Now login */
-                       send_srp_login(hostdata);
+                       init_adapter(hostdata);
                        break;
                default:
                        dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1281,11 +1486,12 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                if (crq->format == 0x06) {
                        /* We need to re-setup the interpartition connection */
                        dev_info(hostdata->dev, "Re-enabling adapter!\n");
+                       hostdata->client_migrated = 1;
                        purge_requests(hostdata, DID_REQUEUE);
-                       if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
-                                                       hostdata)) ||
-                           (ibmvscsi_send_crq(hostdata,
-                                              0xC001000000000000LL, 0))) {
+                       if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
+                                                             hostdata)) ||
+                           (ibmvscsi_ops->send_crq(hostdata,
+                                                   0xC001000000000000LL, 0))) {
                                        atomic_set(&hostdata->request_limit,
                                                   -1);
                                        dev_err(hostdata->dev, "error after enable\n");
@@ -1295,10 +1501,10 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                                crq->format);
 
                        purge_requests(hostdata, DID_ERROR);
-                       if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
-                                                       hostdata)) ||
-                           (ibmvscsi_send_crq(hostdata,
-                                              0xC001000000000000LL, 0))) {
+                       if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue,
+                                                          hostdata)) ||
+                           (ibmvscsi_ops->send_crq(hostdata,
+                                                   0xC001000000000000LL, 0))) {
                                        atomic_set(&hostdata->request_limit,
                                                   -1);
                                        dev_err(hostdata->dev, "error after reset\n");
@@ -1336,6 +1542,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 
        del_timer(&evt_struct->timer);
 
+       if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
+               evt_struct->cmnd->result = DID_ERROR << 16;
        if (evt_struct->done)
                evt_struct->done(evt_struct);
        else
@@ -1373,7 +1581,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
        init_event_struct(evt_struct,
                          sync_completion,
                          VIOSRP_MAD_FORMAT,
-                         init_timeout * HZ);
+                         info_timeout);
 
        host_config = &evt_struct->iu.mad.host_config;
 
@@ -1385,15 +1593,17 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
                                                    length,
                                                    DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(host_config->buffer)) {
-               dev_err(hostdata->dev, "dma_mapping error getting host config\n");
+       if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+               if (!firmware_has_feature(FW_FEATURE_CMO))
+                       dev_err(hostdata->dev,
+                               "dma_mapping error getting host config\n");
                free_event_struct(&hostdata->pool, evt_struct);
                return -1;
        }
 
        init_completion(&evt_struct->comp);
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
        if (rc == 0)
                wait_for_completion(&evt_struct->comp);
@@ -1416,8 +1626,10 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
        unsigned long lock_flags = 0;
 
        spin_lock_irqsave(shost->host_lock, lock_flags);
-       if (sdev->type == TYPE_DISK)
+       if (sdev->type == TYPE_DISK) {
                sdev->allow_restart = 1;
+               blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+       }
        scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
        spin_unlock_irqrestore(shost->host_lock, lock_flags);
        return 0;
@@ -1427,12 +1639,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  * ibmvscsi_change_queue_depth - Change the device's queue depth
  * @sdev:      scsi device struct
  * @qdepth:    depth to set
+ * @reason:    calling context
  *
  * Return value:
  *     actual depth set
  **/
-static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
+                                      int reason)
 {
+       if (reason != SCSI_QDEPTH_DEFAULT)
+               return -EOPNOTSUPP;
+
        if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
                qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
 
@@ -1443,11 +1660,51 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
 /* ------------------------------------------------------------
  * sysfs attributes
  */
-static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
+static ssize_t show_host_vhost_loc(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+       int len;
+
+       len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
+                      hostdata->caps.loc);
+       return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_loc = {
+       .attr = {
+                .name = "vhost_loc",
+                .mode = S_IRUGO,
+                },
+       .show = show_host_vhost_loc,
+};
+
+static ssize_t show_host_vhost_name(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+       int len;
+
+       len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
+                      hostdata->caps.name);
+       return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_name = {
+       .attr = {
+                .name = "vhost_name",
+                .mode = S_IRUGO,
+                },
+       .show = show_host_vhost_name,
+};
+
+static ssize_t show_host_srp_version(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%s\n",
@@ -1455,7 +1712,7 @@ static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_srp_version = {
+static struct device_attribute ibmvscsi_host_srp_version = {
        .attr = {
                 .name = "srp_version",
                 .mode = S_IRUGO,
@@ -1463,12 +1720,12 @@ static struct class_device_attribute ibmvscsi_host_srp_version = {
        .show = show_host_srp_version,
 };
 
-static ssize_t show_host_partition_name(struct class_device *class_dev,
+static ssize_t show_host_partition_name(struct device *dev,
+                                       struct device_attribute *attr,
                                        char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%s\n",
@@ -1476,7 +1733,7 @@ static ssize_t show_host_partition_name(struct class_device *class_dev,
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_partition_name = {
+static struct device_attribute ibmvscsi_host_partition_name = {
        .attr = {
                 .name = "partition_name",
                 .mode = S_IRUGO,
@@ -1484,12 +1741,12 @@ static struct class_device_attribute ibmvscsi_host_partition_name = {
        .show = show_host_partition_name,
 };
 
-static ssize_t show_host_partition_number(struct class_device *class_dev,
+static ssize_t show_host_partition_number(struct device *dev,
+                                         struct device_attribute *attr,
                                          char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%d\n",
@@ -1497,7 +1754,7 @@ static ssize_t show_host_partition_number(struct class_device *class_dev,
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_partition_number = {
+static struct device_attribute ibmvscsi_host_partition_number = {
        .attr = {
                 .name = "partition_number",
                 .mode = S_IRUGO,
@@ -1505,11 +1762,11 @@ static struct class_device_attribute ibmvscsi_host_partition_number = {
        .show = show_host_partition_number,
 };
 
-static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
+static ssize_t show_host_mad_version(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%d\n",
@@ -1517,7 +1774,7 @@ static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_mad_version = {
+static struct device_attribute ibmvscsi_host_mad_version = {
        .attr = {
                 .name = "mad_version",
                 .mode = S_IRUGO,
@@ -1525,18 +1782,18 @@ static struct class_device_attribute ibmvscsi_host_mad_version = {
        .show = show_host_mad_version,
 };
 
-static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
+static ssize_t show_host_os_type(struct device *dev,
+                                struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_os_type = {
+static struct device_attribute ibmvscsi_host_os_type = {
        .attr = {
                 .name = "os_type",
                 .mode = S_IRUGO,
@@ -1544,11 +1801,11 @@ static struct class_device_attribute ibmvscsi_host_os_type = {
        .show = show_host_os_type,
 };
 
-static ssize_t show_host_config(struct class_device *class_dev, char *buf)
+static ssize_t show_host_config(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
 
        /* returns null-terminated host config data */
        if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
@@ -1557,7 +1814,7 @@ static ssize_t show_host_config(struct class_device *class_dev, char *buf)
                return 0;
 }
 
-static struct class_device_attribute ibmvscsi_host_config = {
+static struct device_attribute ibmvscsi_host_config = {
        .attr = {
                 .name = "config",
                 .mode = S_IRUGO,
@@ -1565,7 +1822,9 @@ static struct class_device_attribute ibmvscsi_host_config = {
        .show = show_host_config,
 };
 
-static struct class_device_attribute *ibmvscsi_attrs[] = {
+static struct device_attribute *ibmvscsi_attrs[] = {
+       &ibmvscsi_host_vhost_loc,
+       &ibmvscsi_host_vhost_name,
        &ibmvscsi_host_srp_version,
        &ibmvscsi_host_partition_name,
        &ibmvscsi_host_partition_number,
@@ -1588,7 +1847,7 @@ static struct scsi_host_template driver_template = {
        .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
        .slave_configure = ibmvscsi_slave_configure,
        .change_queue_depth = ibmvscsi_change_queue_depth,
-       .cmd_per_lun = 16,
+       .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
        .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
        .this_id = -1,
        .sg_tablesize = SG_ALL,
@@ -1597,6 +1856,26 @@ static struct scsi_host_template driver_template = {
 };
 
 /**
+ * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ *     Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
+{
+       /* iu_storage data allocated in initialize_event_pool */
+       unsigned long desired_io = max_events * sizeof(union viosrp_iu);
+
+       /* add io space for sg data */
+       desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
+                            IBMVSCSI_CMDS_PER_LUN_DEFAULT);
+
+       return desired_io;
+}
+
+/**
  * Called by bus code for each adapter
  */
 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -1604,32 +1883,39 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        struct ibmvscsi_host_data *hostdata;
        struct Scsi_Host *host;
        struct device *dev = &vdev->dev;
+       struct srp_rport_identifiers ids;
+       struct srp_rport *rport;
        unsigned long wait_switch = 0;
        int rc;
 
-       vdev->dev.driver_data = NULL;
+       dev_set_drvdata(&vdev->dev, NULL);
 
-       driver_template.can_queue = max_requests;
        host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
        if (!host) {
                dev_err(&vdev->dev, "couldn't allocate host data\n");
                goto scsi_host_alloc_failed;
        }
 
-       hostdata = (struct ibmvscsi_host_data *)host->hostdata;
+       host->transportt = ibmvscsi_transport_template;
+       hostdata = shost_priv(host);
        memset(hostdata, 0x00, sizeof(*hostdata));
        INIT_LIST_HEAD(&hostdata->sent);
        hostdata->host = host;
        hostdata->dev = dev;
        atomic_set(&hostdata->request_limit, -1);
-       hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
+       hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
 
-       rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
+       if (map_persist_bufs(hostdata)) {
+               dev_err(&vdev->dev, "couldn't map persistent buffers\n");
+               goto persist_bufs_failed;
+       }
+
+       rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
        if (rc != 0 && rc != H_RESOURCE) {
                dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
                goto init_crq_failed;
        }
-       if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
+       if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
                dev_err(&vdev->dev, "couldn't initialize event pool\n");
                goto init_pool_failed;
        }
@@ -1637,15 +1923,24 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        host->max_lun = 8;
        host->max_id = max_id;
        host->max_channel = max_channel;
+       host->max_cmd_len = 16;
 
        if (scsi_add_host(hostdata->host, hostdata->dev))
                goto add_host_failed;
 
+       /* we don't have a proper target_port_id so let's use the fake one */
+       memcpy(ids.port_id, hostdata->madapter_info.partition_name,
+              sizeof(ids.port_id));
+       ids.roles = SRP_RPORT_ROLE_TARGET;
+       rport = srp_rport_add(host, &ids);
+       if (IS_ERR(rport))
+               goto add_srp_port_failed;
+
        /* Try to send an initialization message.  Note that this is allowed
         * to fail if the other end is not acive.  In that case we don't
         * want to scan
         */
-       if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
+       if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
            || rc == H_RESOURCE) {
                /*
                 * Wait around max init_timeout secs for the adapter to finish
@@ -1665,14 +1960,18 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                        scsi_scan_host(host);
        }
 
-       vdev->dev.driver_data = hostdata;
+       dev_set_drvdata(&vdev->dev, hostdata);
        return 0;
 
+      add_srp_port_failed:
+       scsi_remove_host(hostdata->host);
       add_host_failed:
        release_event_pool(&hostdata->pool, hostdata);
       init_pool_failed:
-       ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
+       ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
       init_crq_failed:
+       unmap_persist_bufs(hostdata);
+      persist_bufs_failed:
        scsi_host_put(host);
       scsi_host_alloc_failed:
        return -1;
@@ -1680,11 +1979,13 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
-       struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+       struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+       unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
-       ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
-                                  max_requests);
-       
+       ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
+                                       max_events);
+
+       srp_remove_host(hostdata->host);
        scsi_remove_host(hostdata->host);
        scsi_host_put(hostdata->host);
 
@@ -1692,6 +1993,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
 }
 
 /**
+ * ibmvscsi_resume: Resume from suspend
+ * @dev:       device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ */
+static int ibmvscsi_resume(struct device *dev)
+{
+       struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
+       return ibmvscsi_ops->resume(hostdata);
+}
+
+/**
  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
  * support.
  */
@@ -1701,24 +2015,55 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
 };
 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
 
+static struct dev_pm_ops ibmvscsi_pm_ops = {
+       .resume = ibmvscsi_resume
+};
+
 static struct vio_driver ibmvscsi_driver = {
        .id_table = ibmvscsi_device_table,
        .probe = ibmvscsi_probe,
        .remove = ibmvscsi_remove,
+       .get_desired_dma = ibmvscsi_get_desired_dma,
        .driver = {
                .name = "ibmvscsi",
                .owner = THIS_MODULE,
+               .pm = &ibmvscsi_pm_ops,
        }
 };
 
+static struct srp_function_template ibmvscsi_transport_functions = {
+};
+
 int __init ibmvscsi_module_init(void)
 {
-       return vio_register_driver(&ibmvscsi_driver);
+       int ret;
+
+       /* Ensure we have two requests to do error recovery */
+       driver_template.can_queue = max_requests;
+       max_events = max_requests + 2;
+
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               ibmvscsi_ops = &iseriesvscsi_ops;
+       else if (firmware_has_feature(FW_FEATURE_VIO))
+               ibmvscsi_ops = &rpavscsi_ops;
+       else
+               return -ENODEV;
+
+       ibmvscsi_transport_template =
+               srp_attach_transport(&ibmvscsi_transport_functions);
+       if (!ibmvscsi_transport_template)
+               return -ENOMEM;
+
+       ret = vio_register_driver(&ibmvscsi_driver);
+       if (ret)
+               srp_release_transport(ibmvscsi_transport_template);
+       return ret;
 }
 
 void __exit ibmvscsi_module_exit(void)
 {
        vio_unregister_driver(&ibmvscsi_driver);
+       srp_release_transport(ibmvscsi_transport_template);
 }
 
 module_init(ibmvscsi_module_init);