[SCSI] aacraid: adjustable timeouts
[safe/jmp/linux-2.6] / drivers / scsi / ipr.c
index 2f84f26..8b80e59 100644 (file)
@@ -98,7 +98,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
 
 /* This table describes the differences between DMA controller chips */
 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
-       { /* Gemstone and Citrine */
+       { /* Gemstone, Citrine, and Obsidian */
                .mailbox = 0x0042C,
                .cache_line_size = 0x20,
                {
@@ -133,6 +133,8 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
 static const struct ipr_chip_t ipr_chip[] = {
        { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
 };
@@ -162,29 +164,6 @@ MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when init
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IPR_DRIVER_VERSION);
 
-static const char *ipr_gpdd_dev_end_states[] = {
-       "Command complete",
-       "Terminated by host",
-       "Terminated by device reset",
-       "Terminated by bus reset",
-       "Unknown",
-       "Command not started"
-};
-
-static const char *ipr_gpdd_dev_bus_phases[] = {
-       "Bus free",
-       "Arbitration",
-       "Selection",
-       "Message out",
-       "Command",
-       "Message in",
-       "Data out",
-       "Data in",
-       "Status",
-       "Reselection",
-       "Unknown"
-};
-
 /*  A constant array of IOASCs/URCs/Error Messages */
 static const
 struct ipr_error_table_t ipr_error_table[] = {
@@ -867,8 +846,8 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
 
        if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
                if (res->sdev) {
-                       res->sdev->hostdata = NULL;
                        res->del_from_ml = 1;
+                       res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
                        if (ioa_cfg->allow_ml_add_del)
                                schedule_work(&ioa_cfg->work_q);
                } else
@@ -1354,8 +1333,8 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
                return;
 
        if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
-               ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
-                           "%s\n", ipr_error_table[error_index].error);
+               ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
+                          "%s\n", ipr_error_table[error_index].error);
        } else {
                dev_err(&ioa_cfg->pdev->dev, "%s\n",
                        ipr_error_table[error_index].error);
@@ -2105,7 +2084,6 @@ restart:
                                did_work = 1;
                                sdev = res->sdev;
                                if (!scsi_device_get(sdev)) {
-                                       res->sdev = NULL;
                                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
                                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                                        scsi_remove_device(sdev);
@@ -2122,6 +2100,7 @@ restart:
                        bus = res->cfgte.res_addr.bus;
                        target = res->cfgte.res_addr.target;
                        lun = res->cfgte.res_addr.lun;
+                       res->add_to_ml = 0;
                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                        scsi_add_device(ioa_cfg->host, bus, target, lun);
                        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -2130,7 +2109,7 @@ restart:
        }
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-       kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
+       kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
        LEAVE;
 }
 
@@ -3212,7 +3191,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
                        sdev->timeout = IPR_VSET_RW_TIMEOUT;
                        blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
                }
-               if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
+               if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
                        sdev->allow_restart = 1;
                scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
        }
@@ -3302,6 +3281,44 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
 }
 
 /**
+ * ipr_device_reset - Reset the device
+ * @ioa_cfg:   ioa config struct
+ * @res:               resource entry struct
+ *
+ * This function issues a device reset to the affected device.
+ * If the device is a SCSI device, a LUN reset will be sent
+ * to the device first. If that does not work, a target reset
+ * will be sent.
+ *
+ * Return value:
+ *     0 on success / non-zero on failure
+ **/
+static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
+                           struct ipr_resource_entry *res)
+{
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_ioarcb *ioarcb;
+       struct ipr_cmd_pkt *cmd_pkt;
+       u32 ioasc;
+
+       ENTER;
+       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ioarcb = &ipr_cmd->ioarcb;
+       cmd_pkt = &ioarcb->cmd_pkt;
+
+       ioarcb->res_handle = res->cfgte.res_handle;
+       cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+       cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
+
+       ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+       ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+
+       LEAVE;
+       return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
+}
+
+/**
  * ipr_eh_dev_reset - Reset the device
  * @scsi_cmd:  scsi command struct
  *
@@ -3317,14 +3334,13 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
        struct ipr_cmnd *ipr_cmd;
        struct ipr_ioa_cfg *ioa_cfg;
        struct ipr_resource_entry *res;
-       struct ipr_cmd_pkt *cmd_pkt;
-       u32 ioasc;
+       int rc;
 
        ENTER;
        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
        res = scsi_cmd->device->hostdata;
 
-       if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
+       if (!res)
                return FAILED;
 
        /*
@@ -3345,25 +3361,12 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
        }
 
        res->resetting_device = 1;
-
-       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
-
-       ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
-       cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
-       cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
-       cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
-
-       ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
-       ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
-
-       ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
-
+       scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
+       rc = ipr_device_reset(ioa_cfg, res);
        res->resetting_device = 0;
 
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-
        LEAVE;
-       return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
+       return (rc ? FAILED : SUCCESS);
 }
 
 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
@@ -3438,7 +3441,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
                return;
        }
 
-       ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
+       sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
        reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
        ipr_cmd->sibling = reset_cmd;
        reset_cmd->sibling = ipr_cmd;
@@ -3502,7 +3505,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
        cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
        ipr_cmd->u.sdev = scsi_cmd->device;
 
-       ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
+       scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
+                   scsi_cmd->cmnd[0]);
        ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
        ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
 
@@ -3813,8 +3817,8 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
 
        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
                scsi_cmd->result |= (DID_ERROR << 16);
-               ipr_sdev_err(scsi_cmd->device,
-                            "Request Sense failed with IOASC: 0x%08X\n", ioasc);
+               scmd_printk(KERN_ERR, scsi_cmd,
+                           "Request Sense failed with IOASC: 0x%08X\n", ioasc);
        } else {
                memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
                       SCSI_SENSE_BUFFERSIZE);
@@ -3936,6 +3940,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
  * ipr_dump_ioasa - Dump contents of IOASA
  * @ioa_cfg:   ioa config struct
  * @ipr_cmd:   ipr command struct
+ * @res:               resource entry struct
  *
  * This function is invoked by the interrupt handler when ops
  * fail. It will log the IOASA if appropriate. Only called
@@ -3945,7 +3950,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
  *     none
  **/
 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
-                          struct ipr_cmnd *ipr_cmd)
+                          struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
 {
        int i;
        u16 data_len;
@@ -3973,16 +3978,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
                        return;
        }
 
-       ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
-                    ipr_error_table[error_index].error);
-
-       if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
-           (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
-               ipr_sdev_err(ipr_cmd->scsi_cmd->device,
-                            "Device End state: %s Phase: %s\n",
-                            ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
-                            ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
-       }
+       ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
 
        if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
                data_len = sizeof(struct ipr_ioasa);
@@ -4139,7 +4135,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
        }
 
        if (ipr_is_gscsi(res))
-               ipr_dump_ioasa(ioa_cfg, ipr_cmd);
+               ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
        else
                ipr_gen_sense(ipr_cmd);
 
@@ -4234,35 +4230,6 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
 }
 
 /**
- * ipr_save_ioafp_mode_select - Save adapters mode select data
- * @ioa_cfg:   ioa config struct
- * @scsi_cmd:  scsi command struct
- *
- * This function saves mode select data for the adapter to
- * use following an adapter reset.
- *
- * Return value:
- *     0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
- **/
-static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
-                                      struct scsi_cmnd *scsi_cmd)
-{
-       if (!ioa_cfg->saved_mode_pages) {
-               ioa_cfg->saved_mode_pages  = kmalloc(sizeof(struct ipr_mode_pages),
-                                                    GFP_ATOMIC);
-               if (!ioa_cfg->saved_mode_pages) {
-                       dev_err(&ioa_cfg->pdev->dev,
-                               "IOA mode select buffer allocation failed\n");
-                       return SCSI_MLQUEUE_HOST_BUSY;
-               }
-       }
-
-       memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
-       ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
-       return 0;
-}
-
-/**
  * ipr_queuecommand - Queue a mid-layer request
  * @scsi_cmd:  scsi command struct
  * @done:              done function
@@ -4336,9 +4303,6 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
            (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
 
-       if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
-               rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
-
        if (likely(rc == 0))
                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 
@@ -4570,7 +4534,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
        ipr_cmd->job_step = ipr_ioa_reset_done;
 
        list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
-               if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
+               if (!ipr_is_scsi_disk(res))
                        continue;
 
                ipr_cmd->u.res = res;
@@ -4827,17 +4791,11 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
        int length;
 
        ENTER;
-       if (ioa_cfg->saved_mode_pages) {
-               memcpy(mode_pages, ioa_cfg->saved_mode_pages,
-                      ioa_cfg->saved_mode_page_len);
-               length = ioa_cfg->saved_mode_page_len;
-       } else {
-               ipr_scsi_bus_speed_limit(ioa_cfg);
-               ipr_check_term_power(ioa_cfg, mode_pages);
-               ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
-               length = mode_pages->hdr.length + 1;
-               mode_pages->hdr.length = 0;
-       }
+       ipr_scsi_bus_speed_limit(ioa_cfg);
+       ipr_check_term_power(ioa_cfg, mode_pages);
+       ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
+       length = mode_pages->hdr.length + 1;
+       mode_pages->hdr.length = 0;
 
        ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
                              ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
@@ -4882,6 +4840,51 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
 }
 
 /**
+ * ipr_reset_cmd_failed - Handle failure of IOA reset command
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function handles the failure of an IOA bringup command.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       dev_err(&ioa_cfg->pdev->dev,
+               "0x%02X failed with IOASC: 0x%08X\n",
+               ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
+
+       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function handles the failure of a Mode Sense to the IOAFP.
+ * Some adapters do not handle all mode pages.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
+{
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
+               ipr_cmd->job_step = ipr_setup_write_cache;
+               return IPR_RC_JOB_CONTINUE;
+       }
+
+       return ipr_reset_cmd_failed(ipr_cmd);
+}
+
+/**
  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
  * @ipr_cmd:   ipr command struct
  *
@@ -4902,6 +4905,7 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
                             sizeof(struct ipr_mode_pages));
 
        ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
+       ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
 
        ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
 
@@ -4970,7 +4974,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
        list_for_each_entry_safe(res, temp, &old_res, queue) {
                if (res->sdev) {
                        res->del_from_ml = 1;
-                       res->sdev->hostdata = NULL;
+                       res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
                        list_move_tail(&res->queue, &ioa_cfg->used_res_q);
                } else {
                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
@@ -5714,7 +5718,6 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
 {
        u32 rc, ioasc;
-       unsigned long scratch = ipr_cmd->u.scratch;
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 
        do {
@@ -5730,17 +5733,13 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
                }
 
                if (IPR_IOASC_SENSE_KEY(ioasc)) {
-                       dev_err(&ioa_cfg->pdev->dev,
-                               "0x%02X failed with IOASC: 0x%08X\n",
-                               ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
-
-                       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-                       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-                       return;
+                       rc = ipr_cmd->job_step_failed(ipr_cmd);
+                       if (rc == IPR_RC_JOB_RETURN)
+                               return;
                }
 
                ipr_reinit_ipr_cmnd(ipr_cmd);
-               ipr_cmd->u.scratch = scratch;
+               ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
                rc = ipr_cmd->job_step(ipr_cmd);
        } while(rc == IPR_RC_JOB_CONTINUE);
 }
@@ -5826,6 +5825,109 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
 }
 
 /**
+ * ipr_reset_freeze - Hold off all I/O activity
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: If the PCI slot is frozen, hold off all I/O
+ * activity; then, as soon as the slot is available again,
+ * initiate an adapter reset.
+ */
+static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
+{
+       /* Disallow new interrupts, avoid loop */
+       ipr_cmd->ioa_cfg->allow_interrupts = 0;
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+       ipr_cmd->done = ipr_reset_ioa_job;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
+ * @pdev:      PCI device struct
+ *
+ * Description: This routine is called to tell us that the PCI bus
+ * is down. Can't do anything here, except put the device driver
+ * into a holding pattern, waiting for the PCI bus to come back.
+ */
+static void ipr_pci_frozen(struct pci_dev *pdev)
+{
+       unsigned long flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+}
+
+/**
+ * ipr_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev:      PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
+{
+       unsigned long flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+                                        IPR_SHUTDOWN_NONE);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ipr_pci_perm_failure - Called when PCI slot is dead for good.
+ * @pdev:      PCI device struct
+ *
+ * Description: This routine is called when the PCI bus has
+ * permanently failed.
+ */
+static void ipr_pci_perm_failure(struct pci_dev *pdev)
+{
+       unsigned long flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+               ioa_cfg->sdt_state = ABORT_DUMP;
+       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+       ioa_cfg->in_ioa_bringdown = 1;
+       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+}
+
+/**
+ * ipr_pci_error_detected - Called when a PCI error is detected.
+ * @pdev:      PCI device struct
+ * @state:     PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ *     PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
+                                              pci_channel_state_t state)
+{
+       switch (state) {
+       case pci_channel_io_frozen:
+               ipr_pci_frozen(pdev);
+               return PCI_ERS_RESULT_NEED_RESET;
+       case pci_channel_io_perm_failure:
+               ipr_pci_perm_failure(pdev);
+               return PCI_ERS_RESULT_DISCONNECT;
+               break;
+       default:
+               break;
+       }
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
  * @ioa_cfg:   ioa cfg struct
  *
@@ -5844,7 +5946,12 @@ static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
        ENTER;
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
        dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
-       _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
+       if (ioa_cfg->needs_hard_reset) {
+               ioa_cfg->needs_hard_reset = 0;
+               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+       } else
+               _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
+                                       IPR_SHUTDOWN_NONE);
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -5921,7 +6028,6 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
        }
 
        ipr_free_dump(ioa_cfg);
-       kfree(ioa_cfg->saved_mode_pages);
        kfree(ioa_cfg->trace);
 }
 
@@ -6221,6 +6327,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
        unsigned long ipr_regs_pci;
        void __iomem *ipr_regs;
        u32 rc = PCIBIOS_SUCCESSFUL;
+       volatile u32 mask, uproc;
 
        ENTER;
 
@@ -6313,6 +6420,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
                goto cleanup_nomem;
        }
 
+       /*
+        * If HRRQ updated interrupt is not masked, or reset alert is set,
+        * the card is in an unknown state and needs a hard reset
+        */
+       mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+       uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
+       if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
+               ioa_cfg->needs_hard_reset = 1;
+
        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
        rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
 
@@ -6554,22 +6670,46 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
              0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
+               0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
                0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
                0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
+       { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
+               0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
        { }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
 
+static struct pci_error_handlers ipr_err_handler = {
+       .error_detected = ipr_pci_error_detected,
+       .slot_reset = ipr_pci_slot_reset,
+};
+
 static struct pci_driver ipr_driver = {
        .name = IPR_NAME,
        .id_table = ipr_pci_table,
        .probe = ipr_probe,
        .remove = ipr_remove,
        .shutdown = ipr_shutdown,
+       .err_handler = &ipr_err_handler,
 };
 
 /**