of: Always use 'struct device.of_node' to get device node pointer.
[safe/jmp/linux-2.6] / drivers / scsi / ibmvscsi / ibmvscsi.c
index b580af9..cc38fef 100644 (file)
@@ -40,7 +40,7 @@
  * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
  * Senders cannot access the buffer directly, but send messages by
  * making a hypervisor call and passing in the 16 bytes.  The hypervisor
- * puts the message in the next 16 byte space in round-robbin fashion,
+ * puts the message in the next 16 byte space in round-robin fashion,
  * turns on the high order bit of the message (the valid bit), and 
  * generates an interrupt to the receiver (if interrupts are turned on.) 
  * The receiver just turns off the valid bit when they have copied out
 #include <linux/moduleparam.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <asm/firmware.h>
 #include <asm/vio.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_srp.h>
 #include "ibmvscsi.h"
 
 /* The values below are somewhat arbitrary default values, but 
  */
 static int max_id = 64;
 static int max_channel = 3;
-static int init_timeout = 5;
+static int init_timeout = 300;
+static int login_timeout = 60;
+static int info_timeout = 30;
+static int abort_timeout = 60;
+static int reset_timeout = 60;
 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
+static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
+static int fast_fail = 1;
+static int client_reserve = 1;
+
+static struct scsi_transport_template *ibmvscsi_transport_template;
 
 #define IBMVSCSI_VERSION "1.5.8"
 
+static struct ibmvscsi_ops *ibmvscsi_ops;
+
 MODULE_DESCRIPTION("IBM Virtual SCSI");
 MODULE_AUTHOR("Dave Boutcher");
 MODULE_LICENSE("GPL");
@@ -100,8 +116,12 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_channel, "Largest channel value");
 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
-module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
+module_param_named(max_requests, max_requests, int, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
+module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
+module_param_named(client_reserve, client_reserve, int, S_IRUGO );
+MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 
 /* ------------------------------------------------------------
  * Routines for the event pool and event structs
@@ -173,9 +193,8 @@ static void release_event_pool(struct event_pool *pool,
                }
        }
        if (in_use)
-               printk(KERN_WARNING
-                      "ibmvscsi: releasing event pool with %d "
-                      "events still in use?\n", in_use);
+               dev_warn(hostdata->dev, "releasing event pool with %d "
+                        "events still in use?\n", in_use);
        kfree(pool->events);
        dma_free_coherent(hostdata->dev,
                          pool->size * sizeof(*pool->iu_storage),
@@ -210,15 +229,13 @@ static void free_event_struct(struct event_pool *pool,
                                       struct srp_event_struct *evt)
 {
        if (!valid_event_struct(pool, evt)) {
-               printk(KERN_ERR
-                      "ibmvscsi: Freeing invalid event_struct %p "
-                      "(not in pool %p)\n", evt, pool->events);
+               dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
+                       "(not in pool %p)\n", evt, pool->events);
                return;
        }
        if (atomic_inc_return(&evt->free) != 1) {
-               printk(KERN_ERR
-                      "ibmvscsi: Freeing event_struct %p "
-                      "which is not in use!\n", evt);
+               dev_err(evt->hostdata->dev, "Freeing event_struct %p "
+                       "which is not in use!\n", evt);
                return;
        }
 }
@@ -353,20 +370,19 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
        }
 }
 
-static int map_sg_list(int num_entries, 
-                      struct scatterlist *sg,
+static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
                       struct srp_direct_buf *md)
 {
        int i;
+       struct scatterlist *sg;
        u64 total_length = 0;
 
-       for (i = 0; i < num_entries; ++i) {
+       scsi_for_each_sg(cmd, sg, nseg, i) {
                struct srp_direct_buf *descr = md + i;
-               struct scatterlist *sg_entry = &sg[i];
-               descr->va = sg_dma_address(sg_entry);
-               descr->len = sg_dma_len(sg_entry);
+               descr->va = sg_dma_address(sg);
+               descr->len = sg_dma_len(sg);
                descr->key = 0;
-               total_length += sg_dma_len(sg_entry);
+               total_length += sg_dma_len(sg);
        }
        return total_length;
 }
@@ -387,40 +403,31 @@ static int map_sg_data(struct scsi_cmnd *cmd,
 
        int sg_mapped;
        u64 total_length = 0;
-       struct scatterlist *sg = cmd->request_buffer;
        struct srp_direct_buf *data =
                (struct srp_direct_buf *) srp_cmd->add_data;
        struct srp_indirect_buf *indirect =
                (struct srp_indirect_buf *) data;
 
-       sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
-
-       if (sg_mapped == 0)
+       sg_mapped = scsi_dma_map(cmd);
+       if (!sg_mapped)
+               return 1;
+       else if (sg_mapped < 0)
                return 0;
 
        set_srp_direction(cmd, srp_cmd, sg_mapped);
 
        /* special case; we can use a single direct descriptor */
        if (sg_mapped == 1) {
-               data->va = sg_dma_address(&sg[0]);
-               data->len = sg_dma_len(&sg[0]);
-               data->key = 0;
+               map_sg_list(cmd, sg_mapped, data);
                return 1;
        }
 
-       if (sg_mapped > SG_ALL) {
-               printk(KERN_ERR
-                      "ibmvscsi: More than %d mapped sg entries, got %d\n",
-                      SG_ALL, sg_mapped);
-               return 0;
-       }
-
        indirect->table_desc.va = 0;
        indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
        indirect->table_desc.key = 0;
 
        if (sg_mapped <= MAX_INDIRECT_BUFS) {
-               total_length = map_sg_list(sg_mapped, sg,
+               total_length = map_sg_list(cmd, sg_mapped,
                                           &indirect->desc_list[0]);
                indirect->len = total_length;
                return 1;
@@ -429,61 +436,30 @@ static int map_sg_data(struct scsi_cmnd *cmd,
        /* get indirect table */
        if (!evt_struct->ext_list) {
                evt_struct->ext_list = (struct srp_direct_buf *)
-                       dma_alloc_coherent(dev, 
+                       dma_alloc_coherent(dev,
                                           SG_ALL * sizeof(struct srp_direct_buf),
                                           &evt_struct->ext_list_token, 0);
                if (!evt_struct->ext_list) {
-                       printk(KERN_ERR
-                              "ibmvscsi: Can't allocate memory for indirect table\n");
+                       if (!firmware_has_feature(FW_FEATURE_CMO))
+                               sdev_printk(KERN_ERR, cmd->device,
+                                           "Can't allocate memory "
+                                           "for indirect table\n");
+                       scsi_dma_unmap(cmd);
                        return 0;
-                       
                }
        }
 
-       total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);        
+       total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
 
        indirect->len = total_length;
        indirect->table_desc.va = evt_struct->ext_list_token;
        indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
        memcpy(indirect->desc_list, evt_struct->ext_list,
               MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
-       
        return 1;
 }
 
 /**
- * map_single_data: - Maps memory and initializes memory decriptor fields
- * @cmd:       struct scsi_cmnd with the memory to be mapped
- * @srp_cmd:   srp_cmd that contains the memory descriptor
- * @dev:       device for which to map dma memory
- *
- * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
- * Returns 1 on success.
-*/
-static int map_single_data(struct scsi_cmnd *cmd,
-                          struct srp_cmd *srp_cmd, struct device *dev)
-{
-       struct srp_direct_buf *data =
-               (struct srp_direct_buf *) srp_cmd->add_data;
-
-       data->va =
-               dma_map_single(dev, cmd->request_buffer,
-                              cmd->request_bufflen,
-                              DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(data->va)) {
-               printk(KERN_ERR
-                      "ibmvscsi: Unable to map request_buffer for command!\n");
-               return 0;
-       }
-       data->len = cmd->request_bufflen;
-       data->key = 0;
-
-       set_srp_direction(cmd, srp_cmd, 1);
-
-       return 1;
-}
-
-/**
  * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  * @cmd:       struct scsi_cmnd with the memory to be mapped
  * @srp_cmd:   srp_cmd that contains the memory descriptor
@@ -503,23 +479,83 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
        case DMA_NONE:
                return 1;
        case DMA_BIDIRECTIONAL:
-               printk(KERN_ERR
-                      "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
+               sdev_printk(KERN_ERR, cmd->device,
+                           "Can't map DMA_BIDIRECTIONAL to read/write\n");
                return 0;
        default:
-               printk(KERN_ERR
-                      "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
-                      cmd->sc_data_direction);
+               sdev_printk(KERN_ERR, cmd->device,
+                           "Unknown data direction 0x%02x; can't map!\n",
+                           cmd->sc_data_direction);
                return 0;
        }
 
-       if (!cmd->request_buffer)
-               return 1;
-       if (cmd->use_sg)
-               return map_sg_data(cmd, evt_struct, srp_cmd, dev);
-       return map_single_data(cmd, srp_cmd, dev);
+       return map_sg_data(cmd, evt_struct, srp_cmd, dev);
+}
+
+/**
+ * purge_requests: Our virtual adapter just shut down.  purge any sent requests
+ * @hostdata:    the adapter
+ */
+static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+{
+       struct srp_event_struct *tmp_evt, *pos;
+       unsigned long flags;
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
+               list_del(&tmp_evt->list);
+               del_timer(&tmp_evt->timer);
+               if (tmp_evt->cmnd) {
+                       tmp_evt->cmnd->result = (error_code << 16);
+                       unmap_cmd_data(&tmp_evt->iu.srp.cmd,
+                                      tmp_evt,
+                                      tmp_evt->hostdata->dev);
+                       if (tmp_evt->cmnd_done)
+                               tmp_evt->cmnd_done(tmp_evt->cmnd);
+               } else if (tmp_evt->done)
+                       tmp_evt->done(tmp_evt);
+               free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
+       }
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
+ * ibmvscsi_reset_host - Reset the connection to the server
+ * @hostdata:  struct ibmvscsi_host_data to reset
+*/
+static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
+{
+       scsi_block_requests(hostdata->host);
+       atomic_set(&hostdata->request_limit, 0);
+
+       purge_requests(hostdata, DID_ERROR);
+       if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata)) ||
+           (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0)) ||
+           (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
+               atomic_set(&hostdata->request_limit, -1);
+               dev_err(hostdata->dev, "error after reset\n");
+       }
+
+       scsi_unblock_requests(hostdata->host);
 }
 
+/**
+ * ibmvscsi_timeout - Internal command timeout handler
+ * @evt_struct:        struct srp_event_struct that timed out
+ *
+ * Called when an internally generated command times out
+*/
+static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
+               evt_struct->iu.srp.cmd.opcode);
+
+       ibmvscsi_reset_host(hostdata);
+}
+
+
 /* ------------------------------------------------------------
  * Routines for sending and receiving SRPs
  */
@@ -527,15 +563,17 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
  * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
  * @evt_struct:        evt_struct to be sent
  * @hostdata:  ibmvscsi_host_data of host
+ * @timeout:   timeout in seconds - 0 means do not time command
  *
  * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
  * Note that this routine assumes that host_lock is held for synchronization
 */
 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
-                                  struct ibmvscsi_host_data *hostdata)
+                                  struct ibmvscsi_host_data *hostdata,
+                                  unsigned long timeout)
 {
        u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
-       int request_status;
+       int request_status = 0;
        int rc;
 
        /* If we have exhausted our request limit, just fail this request,
@@ -553,6 +591,13 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
                if (request_status < -1)
                        goto send_error;
                /* Otherwise, we may have run out of requests. */
+               /* If request limit was 0 when we started the adapter is in the
+                * process of performing a login with the server adapter, or
+                * we may have run out of requests.
+                */
+               else if (request_status == -1 &&
+                        evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+                       goto send_busy;
                /* Abort and reset calls should make it through.
                 * Nothing except abort and reset should use the last two
                 * slots unless we had two or less to begin with.
@@ -588,12 +633,30 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
         */
        list_add_tail(&evt_struct->list, &hostdata->sent);
 
+       init_timer(&evt_struct->timer);
+       if (timeout) {
+               evt_struct->timer.data = (unsigned long) evt_struct;
+               evt_struct->timer.expires = jiffies + (timeout * HZ);
+               evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
+               add_timer(&evt_struct->timer);
+       }
+
        if ((rc =
-            ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+            ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
                list_del(&evt_struct->list);
+               del_timer(&evt_struct->timer);
 
-               printk(KERN_ERR "ibmvscsi: send error %d\n",
-                      rc);
+               /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+                * Firmware will send a CRQ with a transport event (0xFF) to
+                * tell this client what has happened to the transport.  This
+                * will be handled in ibmvscsi_handle_crq()
+                */
+               if (rc == H_CLOSED) {
+                       dev_warn(hostdata->dev, "send warning. "
+                                "Receive queue closed, will retry.\n");
+                       goto send_busy;
+               }
+               dev_err(hostdata->dev, "send error %d\n", rc);
                atomic_inc(&hostdata->request_limit);
                goto send_error;
        }
@@ -604,7 +667,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 
        free_event_struct(&hostdata->pool, evt_struct);
-       atomic_inc(&hostdata->request_limit);
+       if (request_status != -1)
+               atomic_inc(&hostdata->request_limit);
        return SCSI_MLQUEUE_HOST_BUSY;
 
  send_error:
@@ -634,13 +698,12 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
 
        if (unlikely(rsp->opcode != SRP_RSP)) {
                if (printk_ratelimit())
-                       printk(KERN_WARNING 
-                              "ibmvscsi: bad SRP RSP type %d\n",
-                              rsp->opcode);
+                       dev_warn(evt_struct->hostdata->dev,
+                                "bad SRP RSP type %d\n", rsp->opcode);
        }
        
        if (cmnd) {
-               cmnd->result = rsp->status;
+               cmnd->result |= rsp->status;
                if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
                        memcpy(cmnd->sense_buffer,
                               rsp->data,
@@ -650,9 +713,9 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
                               evt_struct->hostdata->dev);
 
                if (rsp->flags & SRP_RSP_FLAG_DOOVER)
-                       cmnd->resid = rsp->data_out_res_cnt;
+                       scsi_set_resid(cmnd, rsp->data_out_res_cnt);
                else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
-                       cmnd->resid = rsp->data_in_res_cnt;
+                       scsi_set_resid(cmnd, rsp->data_in_res_cnt);
        }
 
        if (evt_struct->cmnd_done)
@@ -680,11 +743,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
        struct srp_cmd *srp_cmd;
        struct srp_event_struct *evt_struct;
        struct srp_indirect_buf *indirect;
-       struct ibmvscsi_host_data *hostdata =
-               (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
        u16 lun = lun_from_dev(cmnd->device);
        u8 out_fmt, in_fmt;
 
+       cmnd->result = (DID_OK << 16);
        evt_struct = get_event_struct(&hostdata->pool);
        if (!evt_struct)
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -693,11 +756,13 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
        srp_cmd = &evt_struct->iu.srp.cmd;
        memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
        srp_cmd->opcode = SRP_CMD;
-       memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
+       memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
        srp_cmd->lun = ((u64) lun) << 48;
 
        if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
-               printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
+               if (!firmware_has_feature(FW_FEATURE_CMO))
+                       sdev_printk(KERN_ERR, cmnd->device,
+                                   "couldn't convert cmd to srp_cmd\n");
                free_event_struct(&hostdata->pool, evt_struct);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
@@ -705,7 +770,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
        init_event_struct(evt_struct,
                          handle_cmd_rsp,
                          VIOSRP_SRP_FORMAT,
-                         cmnd->timeout_per_command/HZ);
+                         cmnd->request->timeout/HZ);
 
        evt_struct->cmnd = cmnd;
        evt_struct->cmnd_done = done;
@@ -722,108 +787,59 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
                        offsetof(struct srp_indirect_buf, desc_list);
        }
 
-       return ibmvscsi_send_srp_event(evt_struct, hostdata);
+       return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
 }
 
 /* ------------------------------------------------------------
  * Routines for driver initialization
  */
+
 /**
- * adapter_info_rsp: - Handle response to MAD adapter info request
- * @evt_struct:        srp_event_struct with the response
+ * map_persist_bufs: - Pre-map persistent data for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
  *
- * Used as a "done" callback by when sending adapter_info. Gets called
- * by ibmvscsi_handle_crq()
-*/
-static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+ * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
+ * Return 1 on error, 0 on success.
+ */
+static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
-       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
-       dma_unmap_single(hostdata->dev,
-                        evt_struct->iu.mad.adapter_info.buffer,
-                        evt_struct->iu.mad.adapter_info.common.length,
-                        DMA_BIDIRECTIONAL);
 
-       if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
-               printk("ibmvscsi: error %d getting adapter info\n",
-                      evt_struct->xfer_iu->mad.adapter_info.common.status);
-       } else {
-               printk("ibmvscsi: host srp version: %s, "
-                      "host partition %s (%d), OS %d, max io %u\n",
-                      hostdata->madapter_info.srp_version,
-                      hostdata->madapter_info.partition_name,
-                      hostdata->madapter_info.partition_number,
-                      hostdata->madapter_info.os_type,
-                      hostdata->madapter_info.port_max_txu[0]);
-               
-               if (hostdata->madapter_info.port_max_txu[0]) 
-                       hostdata->host->max_sectors = 
-                               hostdata->madapter_info.port_max_txu[0] >> 9;
-               
-               if (hostdata->madapter_info.os_type == 3 &&
-                   strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
-                       printk("ibmvscsi: host (Ver. %s) doesn't support large"
-                              "transfers\n",
-                              hostdata->madapter_info.srp_version);
-                       printk("ibmvscsi: limiting scatterlists to %d\n",
-                              MAX_INDIRECT_BUFS);
-                       hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
-               }
+       hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
+                                            sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
+               dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
+               return 1;
        }
+
+       hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
+                                                    &hostdata->madapter_info,
+                                                    sizeof(hostdata->madapter_info),
+                                                    DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
+               dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
+               dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+                                sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+               return 1;
+       }
+
+       return 0;
 }
 
 /**
- * send_mad_adapter_info: - Sends the mad adapter info request
- *      and stores the result so it can be retrieved with
- *      sysfs.  We COULD consider causing a failure if the
- *      returned SRP version doesn't match ours.
- * @hostdata:  ibmvscsi_host_data of host
- * 
- * Returns zero if successful.
-*/
-static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+ * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
+ *
+ * Unmap the capabilities and adapter info DMA buffers
+ */
+static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
-       struct viosrp_adapter_info *req;
-       struct srp_event_struct *evt_struct;
-       dma_addr_t addr;
+       dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+                        sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
 
-       evt_struct = get_event_struct(&hostdata->pool);
-       if (!evt_struct) {
-               printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
-                      "for ADAPTER_INFO_REQ!\n");
-               return;
-       }
-
-       init_event_struct(evt_struct,
-                         adapter_info_rsp,
-                         VIOSRP_MAD_FORMAT,
-                         init_timeout * HZ);
-       
-       req = &evt_struct->iu.mad.adapter_info;
-       memset(req, 0x00, sizeof(*req));
-       
-       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
-       req->common.length = sizeof(hostdata->madapter_info);
-       req->buffer = addr = dma_map_single(hostdata->dev,
-                                           &hostdata->madapter_info,
-                                           sizeof(hostdata->madapter_info),
-                                           DMA_BIDIRECTIONAL);
-
-       if (dma_mapping_error(req->buffer)) {
-               printk(KERN_ERR
-                      "ibmvscsi: Unable to map request_buffer "
-                      "for adapter_info!\n");
-               free_event_struct(&hostdata->pool, evt_struct);
-               return;
-       }
-       
-       if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
-               printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
-               dma_unmap_single(hostdata->dev,
-                                addr,
-                                sizeof(hostdata->madapter_info),
-                                DMA_BIDIRECTIONAL);
-       }
-};
+       dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
+                        sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
+}
 
 /**
  * login_rsp: - Handle response to SRP login request
@@ -839,24 +855,21 @@ static void login_rsp(struct srp_event_struct *evt_struct)
        case SRP_LOGIN_RSP:     /* it worked! */
                break;
        case SRP_LOGIN_REJ:     /* refused! */
-               printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
-                      evt_struct->xfer_iu->srp.login_rej.reason);
+               dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
+                        evt_struct->xfer_iu->srp.login_rej.reason);
                /* Login failed.  */
                atomic_set(&hostdata->request_limit, -1);
                return;
        default:
-               printk(KERN_ERR
-                      "ibmvscsi: Invalid login response typecode 0x%02x!\n",
-                      evt_struct->xfer_iu->srp.login_rsp.opcode);
+               dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
+                       evt_struct->xfer_iu->srp.login_rsp.opcode);
                /* Login failed.  */
                atomic_set(&hostdata->request_limit, -1);
                return;
        }
 
-       printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
-
-       if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
-               printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
+       dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
+       hostdata->client_migrated = 0;
 
        /* Now we know what the real request-limit is.
         * This value is set rather than added to request_limit because
@@ -867,15 +880,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
 
        /* If we had any pending I/Os, kick them */
        scsi_unblock_requests(hostdata->host);
-
-       send_mad_adapter_info(hostdata);
-       return;
 }
 
 /**
  * send_srp_login: - Sends the srp login
  * @hostdata:  ibmvscsi_host_data of host
- * 
+ *
  * Returns zero if successful.
 */
 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -884,36 +894,264 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
        unsigned long flags;
        struct srp_login_req *login;
        struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
-       if (!evt_struct) {
-               printk(KERN_ERR
-                      "ibmvscsi: couldn't allocate an event for login req!\n");
-               return FAILED;
-       }
 
-       init_event_struct(evt_struct,
-                         login_rsp,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout * HZ);
+       BUG_ON(!evt_struct);
+       init_event_struct(evt_struct, login_rsp,
+                         VIOSRP_SRP_FORMAT, login_timeout);
 
        login = &evt_struct->iu.srp.login_req;
-       memset(login, 0x00, sizeof(struct srp_login_req));
+       memset(login, 0, sizeof(*login));
        login->opcode = SRP_LOGIN_REQ;
        login->req_it_iu_len = sizeof(union srp_iu);
        login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
-       
+
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       /* Start out with a request limit of 1, since this is negotiated in
-        * the login request we are just sending
+       /* Start out with a request limit of 0, since this is negotiated in
+        * the login request we are just sending and login requests always
+        * get sent by the driver regardless of request_limit.
         */
-       atomic_set(&hostdata->request_limit, 1);
+       atomic_set(&hostdata->request_limit, 0);
 
-       rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-       printk("ibmvscsic: sent SRP login\n");
+       dev_info(hostdata->dev, "sent SRP login\n");
        return rc;
 };
 
 /**
+ * capabilities_rsp: - Handle response to MAD adapter capabilities request
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info.
+ */
+static void capabilities_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       if (evt_struct->xfer_iu->mad.capabilities.common.status) {
+               dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
+                       evt_struct->xfer_iu->mad.capabilities.common.status);
+       } else {
+               if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+                       dev_info(hostdata->dev, "Partition migration not supported\n");
+
+               if (client_reserve) {
+                       if (hostdata->caps.reserve.common.server_support ==
+                           SERVER_SUPPORTS_CAP)
+                               dev_info(hostdata->dev, "Client reserve enabled\n");
+                       else
+                               dev_info(hostdata->dev, "Client reserve not supported\n");
+               }
+       }
+
+       send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_capabilities: - Sends the mad capabilities request
+ *      and stores the result so it can be retrieved with
+ * @hostdata:  ibmvscsi_host_data of host
+ */
+static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
+{
+       struct viosrp_capabilities *req;
+       struct srp_event_struct *evt_struct;
+       unsigned long flags;
+       struct device_node *of_node = hostdata->dev->of_node;
+       const char *location;
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct, capabilities_rsp,
+                         VIOSRP_MAD_FORMAT, info_timeout);
+
+       req = &evt_struct->iu.mad.capabilities;
+       memset(req, 0, sizeof(*req));
+
+       hostdata->caps.flags = CAP_LIST_SUPPORTED;
+       if (hostdata->client_migrated)
+               hostdata->caps.flags |= CLIENT_MIGRATED;
+
+       strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+               sizeof(hostdata->caps.name));
+       hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
+
+       location = of_get_property(of_node, "ibm,loc-code", NULL);
+       location = location ? location : dev_name(hostdata->dev);
+       strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
+       hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+
+       req->common.type = VIOSRP_CAPABILITIES_TYPE;
+       req->buffer = hostdata->caps_addr;
+
+       hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
+       hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
+       hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
+       hostdata->caps.migration.ecl = 1;
+
+       if (client_reserve) {
+               hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
+               hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
+               hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
+               hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
+               req->common.length = sizeof(hostdata->caps);
+       } else
+               req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+               dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * fast_fail_rsp: - Handle response to MAD enable fast fail
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending enable fast fail. Gets called
+ * by ibmvscsi_handle_crq()
+ */
+static void fast_fail_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+       u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+
+       if (status == VIOSRP_MAD_NOT_SUPPORTED)
+               dev_err(hostdata->dev, "fast_fail not supported in server\n");
+       else if (status == VIOSRP_MAD_FAILED)
+               dev_err(hostdata->dev, "fast_fail request failed\n");
+       else if (status != VIOSRP_MAD_SUCCESS)
+               dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
+
+       send_mad_capabilities(hostdata);
+}
+
+/**
+ * init_host - Start host initialization
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+ */
+static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
+{
+       int rc;
+       unsigned long flags;
+       struct viosrp_fast_fail *fast_fail_mad;
+       struct srp_event_struct *evt_struct;
+
+       if (!fast_fail) {
+               send_mad_capabilities(hostdata);
+               return 0;
+       }
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
+
+       fast_fail_mad = &evt_struct->iu.mad.fast_fail;
+       memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
+       fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
+       fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+       return rc;
+}
+
+/**
+ * adapter_info_rsp: - Handle response to MAD adapter info request
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
+               dev_err(hostdata->dev, "error %d getting adapter info\n",
+                       evt_struct->xfer_iu->mad.adapter_info.common.status);
+       } else {
+               dev_info(hostdata->dev, "host srp version: %s, "
+                        "host partition %s (%d), OS %d, max io %u\n",
+                        hostdata->madapter_info.srp_version,
+                        hostdata->madapter_info.partition_name,
+                        hostdata->madapter_info.partition_number,
+                        hostdata->madapter_info.os_type,
+                        hostdata->madapter_info.port_max_txu[0]);
+               
+               if (hostdata->madapter_info.port_max_txu[0]) 
+                       hostdata->host->max_sectors = 
+                               hostdata->madapter_info.port_max_txu[0] >> 9;
+               
+               if (hostdata->madapter_info.os_type == 3 &&
+                   strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+                       dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+                               hostdata->madapter_info.srp_version);
+                       dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+                               MAX_INDIRECT_BUFS);
+                       hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+               }
+
+               if (hostdata->madapter_info.os_type == 3) {
+                       enable_fast_fail(hostdata);
+                       return;
+               }
+       }
+
+       send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_adapter_info: - Sends the mad adapter info request
+ *      and stores the result so it can be retrieved with
+ *      sysfs.  We COULD consider causing a failure if the
+ *      returned SRP version doesn't match ours.
+ * @hostdata:  ibmvscsi_host_data of host
+ * 
+ * Returns zero if successful.
+*/
+static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+       struct viosrp_adapter_info *req;
+       struct srp_event_struct *evt_struct;
+       unsigned long flags;
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct,
+                         adapter_info_rsp,
+                         VIOSRP_MAD_FORMAT,
+                         info_timeout);
+       
+       req = &evt_struct->iu.mad.adapter_info;
+       memset(req, 0x00, sizeof(*req));
+       
+       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
+       req->common.length = sizeof(hostdata->madapter_info);
+       req->buffer = hostdata->adapter_info_addr;
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+               dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * init_adapter: Start virtual adapter initialization sequence
+ *
+ */
+static void init_adapter(struct ibmvscsi_host_data *hostdata)
+{
+       send_mad_adapter_info(hostdata);
+}
+
+/**
  * sync_completion: Signal that a synchronous command has completed
  * Note that after returning from this call, the evt_struct is freed.
  * the caller waiting on this completion shouldn't touch the evt_struct
@@ -934,8 +1172,7 @@ static void sync_completion(struct srp_event_struct *evt_struct)
  */
 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 {
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
        struct srp_tsk_mgmt *tsk_mgmt;
        struct srp_event_struct *evt;
        struct srp_event_struct *tmp_evt, *found_evt;
@@ -943,65 +1180,81 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
        int rsp_rc;
        unsigned long flags;
        u16 lun = lun_from_dev(cmd->device);
+       unsigned long wait_switch = 0;
 
        /* First, find this command in our sent list so we can figure
         * out the correct tag
         */
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       found_evt = NULL;
-       list_for_each_entry(tmp_evt, &hostdata->sent, list) {
-               if (tmp_evt->cmnd == cmd) {
-                       found_evt = tmp_evt;
-                       break;
+       wait_switch = jiffies + (init_timeout * HZ);
+       do {
+               found_evt = NULL;
+               list_for_each_entry(tmp_evt, &hostdata->sent, list) {
+                       if (tmp_evt->cmnd == cmd) {
+                               found_evt = tmp_evt;
+                               break;
+                       }
                }
-       }
 
-       if (!found_evt) {
-               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               return FAILED;
-       }
+               if (!found_evt) {
+                       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+                       return SUCCESS;
+               }
 
-       evt = get_event_struct(&hostdata->pool);
-       if (evt == NULL) {
-               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
-               return FAILED;
-       }
+               evt = get_event_struct(&hostdata->pool);
+               if (evt == NULL) {
+                       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+                       sdev_printk(KERN_ERR, cmd->device,
+                               "failed to allocate abort event\n");
+                       return FAILED;
+               }
        
-       init_event_struct(evt,
-                         sync_completion,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout * HZ);
+               init_event_struct(evt,
+                                 sync_completion,
+                                 VIOSRP_SRP_FORMAT,
+                                 abort_timeout);
 
-       tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+               tsk_mgmt = &evt->iu.srp.tsk_mgmt;
        
-       /* Set up an abort SRP command */
-       memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
-       tsk_mgmt->opcode = SRP_TSK_MGMT;
-       tsk_mgmt->lun = ((u64) lun) << 48;
-       tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
-       tsk_mgmt->task_tag = (u64) found_evt;
-
-       printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
-              tsk_mgmt->lun, tsk_mgmt->task_tag);
-
-       evt->sync_srp = &srp_rsp;
-       init_completion(&evt->comp);
-       rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+               /* Set up an abort SRP command */
+               memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+               tsk_mgmt->opcode = SRP_TSK_MGMT;
+               tsk_mgmt->lun = ((u64) lun) << 48;
+               tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
+               tsk_mgmt->task_tag = (u64) found_evt;
+
+               evt->sync_srp = &srp_rsp;
+
+               init_completion(&evt->comp);
+               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
+
+               if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+                       break;
+
+               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+               msleep(10);
+               spin_lock_irqsave(hostdata->host->host_lock, flags);
+       } while (time_before(jiffies, wait_switch));
+
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
        if (rsp_rc != 0) {
-               printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
+               sdev_printk(KERN_ERR, cmd->device,
+                           "failed to send abort() event. rc=%d\n", rsp_rc);
                return FAILED;
        }
 
+       sdev_printk(KERN_INFO, cmd->device,
+                    "aborting command. lun 0x%llx, tag 0x%llx\n",
+                   (((u64) lun) << 48), (u64) found_evt);
+
        wait_for_completion(&evt->comp);
 
        /* make sure we got a good response */
        if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
                if (printk_ratelimit())
-                       printk(KERN_WARNING 
-                              "ibmvscsi: abort bad SRP RSP type %d\n",
-                              srp_rsp.srp.rsp.opcode);
+                       sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
+                                   srp_rsp.srp.rsp.opcode);
                return FAILED;
        }
 
@@ -1012,10 +1265,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 
        if (rsp_rc) {
                if (printk_ratelimit())
-                       printk(KERN_WARNING 
-                              "ibmvscsi: abort code %d for task tag 0x%lx\n",
-                              rsp_rc,
-                              tsk_mgmt->task_tag);
+                       sdev_printk(KERN_WARNING, cmd->device,
+                                   "abort code %d for task tag 0x%llx\n",
+                                   rsp_rc, tsk_mgmt->task_tag);
                return FAILED;
        }
 
@@ -1034,15 +1286,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 
        if (found_evt == NULL) {
                spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               printk(KERN_INFO
-                      "ibmvscsi: aborted task tag 0x%lx completed\n",
-                      tsk_mgmt->task_tag);
+               sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
+                           tsk_mgmt->task_tag);
                return SUCCESS;
        }
 
-       printk(KERN_INFO
-              "ibmvscsi: successfully aborted task tag 0x%lx\n",
-              tsk_mgmt->task_tag);
+       sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
+                   tsk_mgmt->task_tag);
 
        cmd->result = (DID_ABORT << 16);
        list_del(&found_evt->list);
@@ -1061,9 +1311,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
  */
 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
-
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
        struct srp_tsk_mgmt *tsk_mgmt;
        struct srp_event_struct *evt;
        struct srp_event_struct *tmp_evt, *pos;
@@ -1071,48 +1319,63 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
        int rsp_rc;
        unsigned long flags;
        u16 lun = lun_from_dev(cmd->device);
+       unsigned long wait_switch = 0;
 
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       evt = get_event_struct(&hostdata->pool);
-       if (evt == NULL) {
-               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-               printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
-               return FAILED;
-       }
+       wait_switch = jiffies + (init_timeout * HZ);
+       do {
+               evt = get_event_struct(&hostdata->pool);
+               if (evt == NULL) {
+                       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+                       sdev_printk(KERN_ERR, cmd->device,
+                               "failed to allocate reset event\n");
+                       return FAILED;
+               }
        
-       init_event_struct(evt,
-                         sync_completion,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout * HZ);
+               init_event_struct(evt,
+                                 sync_completion,
+                                 VIOSRP_SRP_FORMAT,
+                                 reset_timeout);
 
-       tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+               tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 
-       /* Set up a lun reset SRP command */
-       memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
-       tsk_mgmt->opcode = SRP_TSK_MGMT;
-       tsk_mgmt->lun = ((u64) lun) << 48;
-       tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
+               /* Set up a lun reset SRP command */
+               memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+               tsk_mgmt->opcode = SRP_TSK_MGMT;
+               tsk_mgmt->lun = ((u64) lun) << 48;
+               tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
+
+               evt->sync_srp = &srp_rsp;
+
+               init_completion(&evt->comp);
+               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
+
+               if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+                       break;
 
-       printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
-              tsk_mgmt->lun);
+               spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+               msleep(10);
+               spin_lock_irqsave(hostdata->host->host_lock, flags);
+       } while (time_before(jiffies, wait_switch));
 
-       evt->sync_srp = &srp_rsp;
-       init_completion(&evt->comp);
-       rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
        if (rsp_rc != 0) {
-               printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
+               sdev_printk(KERN_ERR, cmd->device,
+                           "failed to send reset event. rc=%d\n", rsp_rc);
                return FAILED;
        }
 
+       sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
+                   (((u64) lun) << 48));
+
        wait_for_completion(&evt->comp);
 
        /* make sure we got a good response */
        if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
                if (printk_ratelimit())
-                       printk(KERN_WARNING 
-                              "ibmvscsi: reset bad SRP RSP type %d\n",
-                              srp_rsp.srp.rsp.opcode);
+                       sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
+                                   srp_rsp.srp.rsp.opcode);
                return FAILED;
        }
 
@@ -1123,9 +1386,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 
        if (rsp_rc) {
                if (printk_ratelimit())
-                       printk(KERN_WARNING 
-                              "ibmvscsi: reset code %d for task tag 0x%lx\n",
-                              rsp_rc, tsk_mgmt->task_tag);
+                       sdev_printk(KERN_WARNING, cmd->device,
+                                   "reset code %d for task tag 0x%llx\n",
+                                   rsp_rc, tsk_mgmt->task_tag);
                return FAILED;
        }
 
@@ -1154,32 +1417,29 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 }
 
 /**
- * purge_requests: Our virtual adapter just shut down.  purge any sent requests
- * @hostdata:    the adapter
- */
-static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
+ * @cmd:       struct scsi_cmnd having problems
+*/
+static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       struct srp_event_struct *tmp_evt, *pos;
-       unsigned long flags;
+       unsigned long wait_switch = 0;
+       struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
 
-       spin_lock_irqsave(hostdata->host->host_lock, flags);
-       list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
-               list_del(&tmp_evt->list);
-               if (tmp_evt->cmnd) {
-                       tmp_evt->cmnd->result = (error_code << 16);
-                       unmap_cmd_data(&tmp_evt->iu.srp.cmd, 
-                                      tmp_evt, 
-                                      tmp_evt->hostdata->dev);
-                       if (tmp_evt->cmnd_done)
-                               tmp_evt->cmnd_done(tmp_evt->cmnd);
-               } else {
-                       if (tmp_evt->done) {
-                               tmp_evt->done(tmp_evt);
-                       }
-               }
-               free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
+       dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
+
+       ibmvscsi_reset_host(hostdata);
+
+       for (wait_switch = jiffies + (init_timeout * HZ);
+            time_before(jiffies, wait_switch) &&
+                    atomic_read(&hostdata->request_limit) < 2;) {
+
+               msleep(10);
        }
-       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+       if (atomic_read(&hostdata->request_limit) <= 0)
+               return FAILED;
+
+       return SUCCESS;
 }
 
 /**
@@ -1191,6 +1451,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                         struct ibmvscsi_host_data *hostdata)
 {
+       long rc;
        unsigned long flags;
        struct srp_event_struct *evt_struct =
            (struct srp_event_struct *)crq->IU_data_ptr;
@@ -1198,27 +1459,25 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
        case 0xC0:              /* initialization */
                switch (crq->format) {
                case 0x01:      /* Initialization message */
-                       printk(KERN_INFO "ibmvscsi: partner initialized\n");
+                       dev_info(hostdata->dev, "partner initialized\n");
                        /* Send back a response */
-                       if (ibmvscsi_send_crq(hostdata,
-                                             0xC002000000000000LL, 0) == 0) {
+                       if ((rc = ibmvscsi_ops->send_crq(hostdata,
+                                                        0xC002000000000000LL, 0)) == 0) {
                                /* Now login */
-                               send_srp_login(hostdata);
+                               init_adapter(hostdata);
                        } else {
-                               printk(KERN_ERR
-                                      "ibmvscsi: Unable to send init rsp\n");
+                               dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
                        }
 
                        break;
                case 0x02:      /* Initialization response */
-                       printk(KERN_INFO
-                              "ibmvscsi: partner initialization complete\n");
+                       dev_info(hostdata->dev, "partner initialization complete\n");
 
                        /* Now login */
-                       send_srp_login(hostdata);
+                       init_adapter(hostdata);
                        break;
                default:
-                       printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
+                       dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
                }
                return;
        case 0xFF:      /* Hypervisor telling us the connection is closed */
@@ -1226,33 +1485,29 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                atomic_set(&hostdata->request_limit, 0);
                if (crq->format == 0x06) {
                        /* We need to re-setup the interpartition connection */
-                       printk(KERN_INFO
-                              "ibmvscsi: Re-enabling adapter!\n");
+                       dev_info(hostdata->dev, "Re-enabling adapter!\n");
+                       hostdata->client_migrated = 1;
                        purge_requests(hostdata, DID_REQUEUE);
-                       if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
-                                                       hostdata)) ||
-                           (ibmvscsi_send_crq(hostdata,
-                                              0xC001000000000000LL, 0))) {
+                       if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
+                                                             hostdata)) ||
+                           (ibmvscsi_ops->send_crq(hostdata,
+                                                   0xC001000000000000LL, 0))) {
                                        atomic_set(&hostdata->request_limit,
                                                   -1);
-                                       printk(KERN_ERR
-                                              "ibmvscsi: error after"
-                                              " enable\n");
+                                       dev_err(hostdata->dev, "error after enable\n");
                        }
                } else {
-                       printk(KERN_INFO
-                              "ibmvscsi: Virtual adapter failed rc %d!\n",
-                              crq->format);
+                       dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
+                               crq->format);
 
                        purge_requests(hostdata, DID_ERROR);
-                       if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
-                                                       hostdata)) ||
-                           (ibmvscsi_send_crq(hostdata,
-                                              0xC001000000000000LL, 0))) {
+                       if ((ibmvscsi_ops->reset_crq_queue(&hostdata->queue,
+                                                          hostdata)) ||
+                           (ibmvscsi_ops->send_crq(hostdata,
+                                                   0xC001000000000000LL, 0))) {
                                        atomic_set(&hostdata->request_limit,
                                                   -1);
-                                       printk(KERN_ERR
-                                              "ibmvscsi: error after reset\n");
+                                       dev_err(hostdata->dev, "error after reset\n");
                        }
                }
                scsi_unblock_requests(hostdata->host);
@@ -1260,9 +1515,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
        case 0x80:              /* real payload */
                break;
        default:
-               printk(KERN_ERR
-                      "ibmvscsi: got an invalid message type 0x%02x\n",
-                      crq->valid);
+               dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
+                       crq->valid);
                return;
        }
 
@@ -1271,16 +1525,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
         * actually sent
         */
        if (!valid_event_struct(&hostdata->pool, evt_struct)) {
-               printk(KERN_ERR
-                      "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
+               dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
                       (void *)crq->IU_data_ptr);
                return;
        }
 
        if (atomic_read(&evt_struct->free)) {
-               printk(KERN_ERR
-                      "ibmvscsi: received duplicate  correlation_token 0x%p!\n",
-                      (void *)crq->IU_data_ptr);
+               dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
+                       (void *)crq->IU_data_ptr);
                return;
        }
 
@@ -1288,11 +1540,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
                           &hostdata->request_limit);
 
+       del_timer(&evt_struct->timer);
+
+       if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
+               evt_struct->cmnd->result = DID_ERROR << 16;
        if (evt_struct->done)
                evt_struct->done(evt_struct);
        else
-               printk(KERN_ERR
-                      "ibmvscsi: returned done() is NULL; not running it!\n");
+               dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
 
        /*
         * Lock the host_lock before messing with these structures, since we
@@ -1313,20 +1568,20 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
 {
        struct viosrp_host_config *host_config;
        struct srp_event_struct *evt_struct;
+       unsigned long flags;
        dma_addr_t addr;
        int rc;
 
        evt_struct = get_event_struct(&hostdata->pool);
        if (!evt_struct) {
-               printk(KERN_ERR
-                      "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
+               dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
                return -1;
        }
 
        init_event_struct(evt_struct,
                          sync_completion,
                          VIOSRP_MAD_FORMAT,
-                         init_timeout * HZ);
+                         info_timeout);
 
        host_config = &evt_struct->iu.mad.host_config;
 
@@ -1338,15 +1593,18 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
                                                    length,
                                                    DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(host_config->buffer)) {
-               printk(KERN_ERR
-                      "ibmvscsi: dma_mapping error " "getting host config\n");
+       if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+               if (!firmware_has_feature(FW_FEATURE_CMO))
+                       dev_err(hostdata->dev,
+                               "dma_mapping error getting host config\n");
                free_event_struct(&hostdata->pool, evt_struct);
                return -1;
        }
 
        init_completion(&evt_struct->comp);
-       rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
        if (rc == 0)
                wait_for_completion(&evt_struct->comp);
        dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
@@ -1368,8 +1626,10 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
        unsigned long lock_flags = 0;
 
        spin_lock_irqsave(shost->host_lock, lock_flags);
-       if (sdev->type == TYPE_DISK)
+       if (sdev->type == TYPE_DISK) {
                sdev->allow_restart = 1;
+               blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+       }
        scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
        spin_unlock_irqrestore(shost->host_lock, lock_flags);
        return 0;
@@ -1379,12 +1639,17 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  * ibmvscsi_change_queue_depth - Change the device's queue depth
  * @sdev:      scsi device struct
  * @qdepth:    depth to set
+ * @reason:    calling context
  *
  * Return value:
  *     actual depth set
  **/
-static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
+                                      int reason)
 {
+       if (reason != SCSI_QDEPTH_DEFAULT)
+               return -EOPNOTSUPP;
+
        if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
                qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
 
@@ -1395,11 +1660,51 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
 /* ------------------------------------------------------------
  * sysfs attributes
  */
-static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
+static ssize_t show_host_vhost_loc(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+       int len;
+
+       len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
+                      hostdata->caps.loc);
+       return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_loc = {
+       .attr = {
+                .name = "vhost_loc",
+                .mode = S_IRUGO,
+                },
+       .show = show_host_vhost_loc,
+};
+
+static ssize_t show_host_vhost_name(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+       int len;
+
+       len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
+                      hostdata->caps.name);
+       return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_name = {
+       .attr = {
+                .name = "vhost_name",
+                .mode = S_IRUGO,
+                },
+       .show = show_host_vhost_name,
+};
+
+static ssize_t show_host_srp_version(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%s\n",
@@ -1407,7 +1712,7 @@ static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_srp_version = {
+static struct device_attribute ibmvscsi_host_srp_version = {
        .attr = {
                 .name = "srp_version",
                 .mode = S_IRUGO,
@@ -1415,12 +1720,12 @@ static struct class_device_attribute ibmvscsi_host_srp_version = {
        .show = show_host_srp_version,
 };
 
-static ssize_t show_host_partition_name(struct class_device *class_dev,
+static ssize_t show_host_partition_name(struct device *dev,
+                                       struct device_attribute *attr,
                                        char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%s\n",
@@ -1428,7 +1733,7 @@ static ssize_t show_host_partition_name(struct class_device *class_dev,
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_partition_name = {
+static struct device_attribute ibmvscsi_host_partition_name = {
        .attr = {
                 .name = "partition_name",
                 .mode = S_IRUGO,
@@ -1436,12 +1741,12 @@ static struct class_device_attribute ibmvscsi_host_partition_name = {
        .show = show_host_partition_name,
 };
 
-static ssize_t show_host_partition_number(struct class_device *class_dev,
+static ssize_t show_host_partition_number(struct device *dev,
+                                         struct device_attribute *attr,
                                          char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%d\n",
@@ -1449,7 +1754,7 @@ static ssize_t show_host_partition_number(struct class_device *class_dev,
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_partition_number = {
+static struct device_attribute ibmvscsi_host_partition_number = {
        .attr = {
                 .name = "partition_number",
                 .mode = S_IRUGO,
@@ -1457,11 +1762,11 @@ static struct class_device_attribute ibmvscsi_host_partition_number = {
        .show = show_host_partition_number,
 };
 
-static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
+static ssize_t show_host_mad_version(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%d\n",
@@ -1469,7 +1774,7 @@ static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_mad_version = {
+static struct device_attribute ibmvscsi_host_mad_version = {
        .attr = {
                 .name = "mad_version",
                 .mode = S_IRUGO,
@@ -1477,18 +1782,18 @@ static struct class_device_attribute ibmvscsi_host_mad_version = {
        .show = show_host_mad_version,
 };
 
-static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
+static ssize_t show_host_os_type(struct device *dev,
+                                struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
        int len;
 
        len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
        return len;
 }
 
-static struct class_device_attribute ibmvscsi_host_os_type = {
+static struct device_attribute ibmvscsi_host_os_type = {
        .attr = {
                 .name = "os_type",
                 .mode = S_IRUGO,
@@ -1496,11 +1801,11 @@ static struct class_device_attribute ibmvscsi_host_os_type = {
        .show = show_host_os_type,
 };
 
-static ssize_t show_host_config(struct class_device *class_dev, char *buf)
+static ssize_t show_host_config(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(class_dev);
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)shost->hostdata;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
 
        /* returns null-terminated host config data */
        if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
@@ -1509,7 +1814,7 @@ static ssize_t show_host_config(struct class_device *class_dev, char *buf)
                return 0;
 }
 
-static struct class_device_attribute ibmvscsi_host_config = {
+static struct device_attribute ibmvscsi_host_config = {
        .attr = {
                 .name = "config",
                 .mode = S_IRUGO,
@@ -1517,7 +1822,9 @@ static struct class_device_attribute ibmvscsi_host_config = {
        .show = show_host_config,
 };
 
-static struct class_device_attribute *ibmvscsi_attrs[] = {
+static struct device_attribute *ibmvscsi_attrs[] = {
+       &ibmvscsi_host_vhost_loc,
+       &ibmvscsi_host_vhost_name,
        &ibmvscsi_host_srp_version,
        &ibmvscsi_host_partition_name,
        &ibmvscsi_host_partition_number,
@@ -1537,9 +1844,10 @@ static struct scsi_host_template driver_template = {
        .queuecommand = ibmvscsi_queuecommand,
        .eh_abort_handler = ibmvscsi_eh_abort_handler,
        .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
+       .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
        .slave_configure = ibmvscsi_slave_configure,
        .change_queue_depth = ibmvscsi_change_queue_depth,
-       .cmd_per_lun = 16,
+       .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
        .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
        .this_id = -1,
        .sg_tablesize = SG_ALL,
@@ -1548,6 +1856,26 @@ static struct scsi_host_template driver_template = {
 };
 
 /**
+ * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ *     Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
+{
+       /* iu_storage data allocated in initialize_event_pool */
+       unsigned long desired_io = max_events * sizeof(union viosrp_iu);
+
+       /* add io space for sg data */
+       desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
+                            IBMVSCSI_CMDS_PER_LUN_DEFAULT);
+
+       return desired_io;
+}
+
+/**
  * Called by bus code for each adapter
  */
 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -1555,48 +1883,64 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        struct ibmvscsi_host_data *hostdata;
        struct Scsi_Host *host;
        struct device *dev = &vdev->dev;
+       struct srp_rport_identifiers ids;
+       struct srp_rport *rport;
        unsigned long wait_switch = 0;
        int rc;
 
-       vdev->dev.driver_data = NULL;
+       dev_set_drvdata(&vdev->dev, NULL);
 
-       driver_template.can_queue = max_requests;
        host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
        if (!host) {
-               printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
+               dev_err(&vdev->dev, "couldn't allocate host data\n");
                goto scsi_host_alloc_failed;
        }
 
-       hostdata = (struct ibmvscsi_host_data *)host->hostdata;
+       host->transportt = ibmvscsi_transport_template;
+       hostdata = shost_priv(host);
        memset(hostdata, 0x00, sizeof(*hostdata));
        INIT_LIST_HEAD(&hostdata->sent);
        hostdata->host = host;
        hostdata->dev = dev;
        atomic_set(&hostdata->request_limit, -1);
-       hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
+       hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
 
-       rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
+       if (map_persist_bufs(hostdata)) {
+               dev_err(&vdev->dev, "couldn't map persistent buffers\n");
+               goto persist_bufs_failed;
+       }
+
+       rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
        if (rc != 0 && rc != H_RESOURCE) {
-               printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
+               dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
                goto init_crq_failed;
        }
-       if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
-               printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
+       if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
+               dev_err(&vdev->dev, "couldn't initialize event pool\n");
                goto init_pool_failed;
        }
 
        host->max_lun = 8;
        host->max_id = max_id;
        host->max_channel = max_channel;
+       host->max_cmd_len = 16;
 
        if (scsi_add_host(hostdata->host, hostdata->dev))
                goto add_host_failed;
 
+       /* we don't have a proper target_port_id so let's use the fake one */
+       memcpy(ids.port_id, hostdata->madapter_info.partition_name,
+              sizeof(ids.port_id));
+       ids.roles = SRP_RPORT_ROLE_TARGET;
+       rport = srp_rport_add(host, &ids);
+       if (IS_ERR(rport))
+               goto add_srp_port_failed;
+
        /* Try to send an initialization message.  Note that this is allowed
         * to fail if the other end is not acive.  In that case we don't
         * want to scan
         */
-       if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
+       if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
            || rc == H_RESOURCE) {
                /*
                 * Wait around max init_timeout secs for the adapter to finish
@@ -1616,14 +1960,18 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                        scsi_scan_host(host);
        }
 
-       vdev->dev.driver_data = hostdata;
+       dev_set_drvdata(&vdev->dev, hostdata);
        return 0;
 
+      add_srp_port_failed:
+       scsi_remove_host(hostdata->host);
       add_host_failed:
        release_event_pool(&hostdata->pool, hostdata);
       init_pool_failed:
-       ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests);
+       ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
       init_crq_failed:
+       unmap_persist_bufs(hostdata);
+      persist_bufs_failed:
        scsi_host_put(host);
       scsi_host_alloc_failed:
        return -1;
@@ -1631,11 +1979,13 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
-       struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+       struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+       unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
-       ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
-                                  max_requests);
-       
+       ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
+                                       max_events);
+
+       srp_remove_host(hostdata->host);
        scsi_remove_host(hostdata->host);
        scsi_host_put(hostdata->host);
 
@@ -1643,6 +1993,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
 }
 
 /**
+ * ibmvscsi_resume: Resume from suspend
+ * @dev:       device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ */
+static int ibmvscsi_resume(struct device *dev)
+{
+       struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
+       return ibmvscsi_ops->resume(hostdata);
+}
+
+/**
  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
  * support.
  */
@@ -1652,24 +2015,55 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
 };
 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
 
+static struct dev_pm_ops ibmvscsi_pm_ops = {
+       .resume = ibmvscsi_resume
+};
+
 static struct vio_driver ibmvscsi_driver = {
        .id_table = ibmvscsi_device_table,
        .probe = ibmvscsi_probe,
        .remove = ibmvscsi_remove,
+       .get_desired_dma = ibmvscsi_get_desired_dma,
        .driver = {
                .name = "ibmvscsi",
                .owner = THIS_MODULE,
+               .pm = &ibmvscsi_pm_ops,
        }
 };
 
+static struct srp_function_template ibmvscsi_transport_functions = {
+};
+
 int __init ibmvscsi_module_init(void)
 {
-       return vio_register_driver(&ibmvscsi_driver);
+       int ret;
+
+       /* Ensure we have two requests to do error recovery */
+       driver_template.can_queue = max_requests;
+       max_events = max_requests + 2;
+
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               ibmvscsi_ops = &iseriesvscsi_ops;
+       else if (firmware_has_feature(FW_FEATURE_VIO))
+               ibmvscsi_ops = &rpavscsi_ops;
+       else
+               return -ENODEV;
+
+       ibmvscsi_transport_template =
+               srp_attach_transport(&ibmvscsi_transport_functions);
+       if (!ibmvscsi_transport_template)
+               return -ENOMEM;
+
+       ret = vio_register_driver(&ibmvscsi_driver);
+       if (ret)
+               srp_release_transport(ibmvscsi_transport_template);
+       return ret;
 }
 
 void __exit ibmvscsi_module_exit(void)
 {
        vio_unregister_driver(&ibmvscsi_driver);
+       srp_release_transport(ibmvscsi_transport_template);
 }
 
 module_init(ibmvscsi_module_init);