#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/libata.h>
+#include <linux/hdreg.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
/*
* Global Data
*/
-static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
+static LIST_HEAD(ipr_ioa_head);
static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
static unsigned int ipr_max_speed = 1;
static int ipr_testmode = 0;
static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_enable_cache = 1;
static unsigned int ipr_debug = 0;
+static unsigned int ipr_dual_ioa_raid = 1;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] }
};
static int ipr_max_bus_speeds [] = {
MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
module_param_named(testmode, ipr_testmode, int, 0);
MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
-module_param_named(fastfail, ipr_fastfail, int, 0);
+module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
module_param_named(enable_cache, ipr_enable_cache, int, 0);
MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
-module_param_named(debug, ipr_debug, int, 0);
+module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
+module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
+MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
"8009: Impending cache battery pack failure"},
{0x02040400, 0, 0,
"34FF: Disk device format in progress"},
+ {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9070: IOA requested reset"},
{0x023F0000, 0, 0,
"Synchronization required"},
{0x024E0000, 0, 0,
"9076: Configuration error, missing remote IOA"},
{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
"4050: Enclosure does not support a required multipath function"},
+ {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4070: Logically bad block written on device"},
{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
"9041: Array protection temporarily suspended"},
{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
}
/**
- * ipr_unmap_sglist - Unmap scatterlist if mapped
- * @ioa_cfg: ioa config struct
- * @ipr_cmd: ipr command struct
- *
- * Return value:
- * nothing
- **/
-static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
- struct ipr_cmnd *ipr_cmd)
-{
- struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
-
- if (ipr_cmd->dma_use_sg) {
- if (scsi_cmd->use_sg > 0) {
- pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
- scsi_cmd->use_sg,
- scsi_cmd->sc_data_direction);
- } else {
- pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
- scsi_cmd->request_bufflen,
- scsi_cmd->sc_data_direction);
- }
- }
-}
-
-/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
* @clr_ints: interrupts to clear
scsi_cmd->result |= (DID_ERROR << 16);
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
}
}
/**
+ * strip_and_pad_whitespace - Strip and pad trailing whitespace.
+ * @i: index into buffer
+ * @buf: string to modify
+ *
+ * This function will strip all trailing whitespace, pad the end
+ * of the string with a single space, and NULL terminate the string.
+ *
+ * Return value:
+ * new length of string
+ **/
+static int strip_and_pad_whitespace(int i, char *buf)
+{
+ while (i && buf[i] == ' ')
+ i--;
+ buf[i+1] = ' ';
+ buf[i+2] = '\0';
+ return i + 2;
+}
+
+/**
+ * ipr_log_vpd_compact - Log the passed extended VPD compactly.
+ * @prefix: string to print at start of printk
+ * @hostrcb: hostrcb pointer
+ * @vpd: vendor/product id/sn struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
+ struct ipr_vpd *vpd)
+{
+ char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
+ int i = 0;
+
+ memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+ i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
+
+ memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
+ i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
+
+ memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
+ buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
+
+ ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
+}
+
+/**
* ipr_log_vpd - Log the passed VPD to the error log.
* @vpd: vendor/product id/sn struct
*
}
/**
+ * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
+ * @prefix: string to print at start of printk
+ * @hostrcb: hostrcb pointer
+ * @vpd: vendor/product id/sn/wwn struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
+ struct ipr_ext_vpd *vpd)
+{
+ ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
+ ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
+ be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
+}
+
+/**
* ipr_log_ext_vpd - Log the passed extended VPD to the error log.
* @vpd: vendor/product id/sn/wwn struct
*
error = &hostrcb->hcam.u.error.u.type_17_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ strstrip(error->failure_reason);
- ipr_err("%s\n", error->failure_reason);
- ipr_err("Remote Adapter VPD:\n");
- ipr_log_ext_vpd(&error->vpd);
+ ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
+ be32_to_cpu(hostrcb->hcam.u.error.prc));
+ ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
error = &hostrcb->hcam.u.error.u.type_07_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ strstrip(error->failure_reason);
- ipr_err("%s\n", error->failure_reason);
- ipr_err("Remote Adapter VPD:\n");
- ipr_log_vpd(&error->vpd);
+ ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
+ be32_to_cpu(hostrcb->hcam.u.error.prc));
+ ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
list_del(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
if (!ioasc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
+ if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
sizeof(struct ipr_dump_entry_header);
driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
- strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
+ strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
driver_dump->hdr.num_entries++;
}
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
+ kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
}
/**
* ipr_read_trace - Dump the adapter trace
* @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t ipr_read_trace(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
- struct class_device *cdev = container_of(kobj,struct class_device,kobj);
- struct Scsi_Host *shost = class_to_shost(cdev);
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
- int size = IPR_TRACE_SIZE;
- char *src = (char *)ioa_cfg->trace;
-
- if (off > size)
- return 0;
- if (off + count > size) {
- size -= off;
- count = size;
- }
+ ssize_t ret;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- memcpy(buf, &src[off], count);
+ ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
+ IPR_TRACE_SIZE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return count;
+
+ return ret;
}
static struct bin_attribute ipr_trace_attr = {
/**
* ipr_show_write_caching - Show the write caching attribute
- * @class_dev: class device struct
- * @buf: buffer
+ * @dev: device struct
+ * @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
+static ssize_t ipr_show_write_caching(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
int i, len = 0;
/**
* ipr_store_write_caching - Enable/disable adapter write cache
- * @class_dev: class_device struct
- * @buf: buffer
- * @count: buffer size
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
*
* This function will enable/disable adapter write cache.
*
* Return value:
* count on success / other on failure
**/
-static ssize_t ipr_store_write_caching(struct class_device *class_dev,
- const char *buf, size_t count)
+static ssize_t ipr_store_write_caching(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
enum ipr_cache_state new_state = CACHE_INVALID;
return count;
}
-static struct class_device_attribute ipr_ioa_cache_attr = {
+static struct device_attribute ipr_ioa_cache_attr = {
.attr = {
.name = "write_cache",
.mode = S_IRUGO | S_IWUSR,
/**
* ipr_show_fw_version - Show the firmware version
- * @class_dev: class device struct
- * @buf: buffer
+ * @dev: class device struct
+ * @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
+static ssize_t ipr_show_fw_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
unsigned long lock_flags = 0;
return len;
}
-static struct class_device_attribute ipr_fw_version_attr = {
+static struct device_attribute ipr_fw_version_attr = {
.attr = {
.name = "fw_version",
.mode = S_IRUGO,
/**
* ipr_show_log_level - Show the adapter's error logging level
- * @class_dev: class device struct
- * @buf: buffer
+ * @dev: class device struct
+ * @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
+static ssize_t ipr_show_log_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
int len;
/**
* ipr_store_log_level - Change the adapter's error logging level
- * @class_dev: class device struct
- * @buf: buffer
+ * @dev: class device struct
+ * @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_store_log_level(struct class_device *class_dev,
+static ssize_t ipr_store_log_level(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
return strlen(buf);
}
-static struct class_device_attribute ipr_log_level_attr = {
+static struct device_attribute ipr_log_level_attr = {
.attr = {
.name = "log_level",
.mode = S_IRUGO | S_IWUSR,
/**
* ipr_store_diagnostics - IOA Diagnostics interface
- * @class_dev: class_device struct
- * @buf: buffer
- * @count: buffer size
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
*
* This function will reset the adapter and wait a reasonable
* amount of time for any errors that the adapter might log.
* Return value:
* count on success / other on failure
**/
-static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
+static ssize_t ipr_store_diagnostics(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
int rc = count;
return rc;
}
-static struct class_device_attribute ipr_diagnostics_attr = {
+static struct device_attribute ipr_diagnostics_attr = {
.attr = {
.name = "run_diagnostics",
.mode = S_IWUSR,
/**
* ipr_show_adapter_state - Show the adapter's state
- * @class_dev: class device struct
- * @buf: buffer
+ * @class_dev: device struct
+ * @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
+static ssize_t ipr_show_adapter_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
int len;
/**
* ipr_store_adapter_state - Change adapter state
- * @class_dev: class_device struct
- * @buf: buffer
- * @count: buffer size
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
*
* This function will change the adapter's state.
*
* Return value:
* count on success / other on failure
**/
-static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
+static ssize_t ipr_store_adapter_state(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags;
int result = count;
return result;
}
-static struct class_device_attribute ipr_ioa_state_attr = {
+static struct device_attribute ipr_ioa_state_attr = {
.attr = {
- .name = "state",
+ .name = "online_state",
.mode = S_IRUGO | S_IWUSR,
},
.show = ipr_show_adapter_state,
/**
* ipr_store_reset_adapter - Reset the adapter
- * @class_dev: class_device struct
- * @buf: buffer
- * @count: buffer size
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
*
* This function will reset the adapter.
*
* Return value:
* count on success / other on failure
**/
-static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
+static ssize_t ipr_store_reset_adapter(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags;
int result = count;
return result;
}
-static struct class_device_attribute ipr_ioa_reset_attr = {
+static struct device_attribute ipr_ioa_reset_attr = {
.attr = {
.name = "reset_host",
.mode = S_IWUSR,
}
scatterlist = sglist->scatterlist;
+ sg_init_table(scatterlist, num_elem);
sglist->order = order;
sglist->num_sg = num_elem;
/* Free up what we already allocated */
for (j = i - 1; j >= 0; j--)
- __free_pages(scatterlist[j].page, order);
+ __free_pages(sg_page(&scatterlist[j]), order);
kfree(sglist);
return NULL;
}
- scatterlist[i].page = page;
+ sg_set_page(&scatterlist[i], page, 0, 0);
}
return sglist;
int i;
for (i = 0; i < sglist->num_sg; i++)
- __free_pages(sglist->scatterlist[i].page, sglist->order);
+ __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
kfree(sglist);
}
scatterlist = sglist->scatterlist;
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- kaddr = kmap(scatterlist[i].page);
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
memcpy(kaddr, buffer, bsize_elem);
- kunmap(scatterlist[i].page);
+ kunmap(page);
scatterlist[i].length = bsize_elem;
}
if (len % bsize_elem) {
- kaddr = kmap(scatterlist[i].page);
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
memcpy(kaddr, buffer, len % bsize_elem);
- kunmap(scatterlist[i].page);
+ kunmap(page);
scatterlist[i].length = len % bsize_elem;
}
/**
* ipr_store_update_fw - Update the firmware on the adapter
- * @class_dev: class_device struct
- * @buf: buffer
- * @count: buffer size
+ * @class_dev: device struct
+ * @buf: buffer
+ * @count: buffer size
*
* This function will update the firmware on the adapter.
*
* Return value:
* count on success / other on failure
**/
-static ssize_t ipr_store_update_fw(struct class_device *class_dev,
- const char *buf, size_t count)
+static ssize_t ipr_store_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_ucode_image_header *image_hdr;
const struct firmware *fw_entry;
return result;
}
-static struct class_device_attribute ipr_update_fw_attr = {
+static struct device_attribute ipr_update_fw_attr = {
.attr = {
.name = "update_fw",
.mode = S_IWUSR,
.store = ipr_store_update_fw
};
-static struct class_device_attribute *ipr_ioa_attrs[] = {
+static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_fw_version_attr,
&ipr_log_level_attr,
&ipr_diagnostics_attr,
/**
* ipr_read_dump - Dump the adapter
* @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t ipr_read_dump(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
- struct class_device *cdev = container_of(kobj,struct class_device,kobj);
+ struct device *cdev = container_of(kobj, struct device, kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_dump *dump;
/**
* ipr_write_dump - Setup dump state of adapter
* @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t ipr_write_dump(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
- struct class_device *cdev = container_of(kobj,struct class_device,kobj);
+ struct device *cdev = container_of(kobj, struct device, kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
int rc;
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
+ struct ata_port *ap = NULL;
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
sdev->no_uld_attach = 1;
}
if (ipr_is_vset_device(res)) {
- sdev->timeout = IPR_VSET_RW_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue,
+ IPR_VSET_RW_TIMEOUT);
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
sdev->allow_restart = 1;
- if (ipr_is_gata(res) && res->sata_port) {
+ if (ipr_is_gata(res) && res->sata_port)
+ ap = res->sata_port->ap;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ap) {
scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
- ata_sas_slave_configure(sdev, res->sata_port->ap);
- } else {
+ ata_sas_slave_configure(sdev, ap);
+ } else
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
- }
+ return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return 0;
/**
* ipr_sata_reset - Reset the SATA port
- * @ap: SATA port to reset
+ * @link: SATA link to reset
* @classes: class of the attached device
*
- * This function issues a SATA phy reset to the affected ATA port.
+ * This function issues a SATA phy reset to the affected ATA link.
*
* Return value:
* 0 on success / non-zero on failure
**/
-static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
+static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
- struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_sata_port *sata_port = link->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
if (ipr_is_gata(res) && res->sata_port) {
ap = res->sata_port->ap;
spin_unlock_irq(scsi_cmd->device->host->host_lock);
- ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
+ ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
+
+ list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+ rc = -EIO;
+ break;
+ }
+ }
} else
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
}
/**
+ * ipr_isr_eh - Interrupt service routine error handler
+ * @ioa_cfg: ioa config struct
+ * @msg: message to log
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+{
+ ioa_cfg->errors_logged++;
+ dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+}
+
+/**
* ipr_isr - Interrupt service routine
* @irq: irq number
* @devp: pointer to ioa config struct
volatile u32 int_reg, int_mask_reg;
u32 ioasc;
u16 cmd_index;
+ int num_hrrq = 0;
struct ipr_cmnd *ipr_cmd;
irqreturn_t rc = IRQ_NONE;
IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
- ioa_cfg->errors_logged++;
- dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
-
- if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
- ioa_cfg->sdt_state = GET_DUMP;
-
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_HANDLED;
}
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ do {
+ writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+
+ if (int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
+ }
+
} else
break;
}
static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd)
{
- int i;
- struct scatterlist *sglist;
+ int i, nseg;
+ struct scatterlist *sg;
u32 length;
u32 ioadl_flags = 0;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
- length = scsi_cmd->request_bufflen;
-
- if (length == 0)
+ length = scsi_bufflen(scsi_cmd);
+ if (!length)
return 0;
- if (scsi_cmd->use_sg) {
- ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
- scsi_cmd->request_buffer,
- scsi_cmd->use_sg,
- scsi_cmd->sc_data_direction);
-
- if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(length);
- ioarcb->read_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- }
+ nseg = scsi_dma_map(scsi_cmd);
+ if (nseg < 0) {
+ dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+ return -1;
+ }
- sglist = scsi_cmd->request_buffer;
+ ipr_cmd->dma_use_sg = nseg;
- if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- }
-
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
- ioadl[i].flags_and_data_len =
- cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
- ioadl[i].address =
- cpu_to_be32(sg_dma_address(&sglist[i]));
- }
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->write_data_transfer_length = cpu_to_be32(length);
+ ioarcb->write_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+ ioarcb->read_data_transfer_length = cpu_to_be32(length);
+ ioarcb->read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ }
- if (likely(ipr_cmd->dma_use_sg)) {
- ioadl[i-1].flags_and_data_len |=
- cpu_to_be32(IPR_IOADL_FLAGS_LAST);
- return 0;
- } else
- dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
- } else {
- if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(length);
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- }
+ if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
+ ioadl = ioarcb->add_data.u.ioadl;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
+ offsetof(struct ipr_ioarcb, add_data));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
- ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
- scsi_cmd->request_buffer, length,
- scsi_cmd->sc_data_direction);
-
- if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- ipr_cmd->dma_use_sg = 1;
- ioadl[0].flags_and_data_len =
- cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
- ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
- return 0;
- } else
- dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
+ scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+ ioadl[i].flags_and_data_len =
+ cpu_to_be32(ioadl_flags | sg_dma_len(sg));
+ ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
}
- return -1;
+ ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ return 0;
}
/**
res->needs_sync_complete = 1;
res->in_erp = 0;
}
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
if (!res) {
ipr_scsi_eh_done(ipr_cmd);
return;
}
- if (!ipr_is_gscsi(res))
+ if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
ipr_gen_sense(ipr_cmd);
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
- switch (ioasc & IPR_IOASC_IOASC_MASK) {
+ switch (masked_ioasc) {
case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
if (ipr_is_naca_model(res))
scsi_cmd->result |= (DID_ABORT << 16);
break;
}
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
- scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
+ scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
} else
struct ipr_resource_entry *res;
res = (struct ipr_resource_entry *)sdev->hostdata;
- if (res && ipr_is_gata(res))
- return ata_scsi_ioctl(sdev, cmd, arg);
+ if (res && ipr_is_gata(res)) {
+ if (cmd == HDIO_GET_IDENTITY)
+ return -ENOTTY;
+ return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
+ }
return -EINVAL;
}
rc = ipr_device_reset(ioa_cfg, res);
if (rc) {
- ap->ops->port_disable(ap);
+ ata_port_disable(ap);
goto out_unlock;
}
switch(res->cfgte.proto) {
case IPR_PROTO_SATA:
case IPR_PROTO_SAS_STP:
- ap->device[0].class = ATA_DEV_ATA;
+ ap->link.device[0].class = ATA_DEV_ATA;
break;
case IPR_PROTO_SATA_ATAPI:
case IPR_PROTO_SAS_STP_ATAPI:
- ap->device[0].class = ATA_DEV_ATAPI;
+ ap->link.device[0].class = ATA_DEV_ATAPI;
break;
default:
- ap->device[0].class = ATA_DEV_UNKNOWN;
- ap->ops->port_disable(ap);
+ ap->link.device[0].class = ATA_DEV_UNKNOWN;
+ ata_port_disable(ap);
break;
};
}
/**
- * ipr_tf_read - Read the current ATA taskfile for the ATA port
- * @ap: ATA port
- * @tf: destination ATA taskfile
- *
- * Return value:
- * none
- **/
-static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
- struct ipr_sata_port *sata_port = ap->private_data;
- struct ipr_ioasa_gata *g = &sata_port->ioasa;
-
- tf->feature = g->error;
- tf->nsect = g->nsect;
- tf->lbal = g->lbal;
- tf->lbam = g->lbam;
- tf->lbah = g->lbah;
- tf->device = g->device;
- tf->command = g->status;
- tf->hob_nsect = g->hob_nsect;
- tf->hob_lbal = g->hob_lbal;
- tf->hob_lbam = g->hob_lbam;
- tf->hob_lbah = g->hob_lbah;
- tf->ctl = g->alt_status;
-}
-
-/**
* ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
* @regs: destination
* @tf: source ATA taskfile
u32 ioadl_flags = 0;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
- int len = qc->nbytes + qc->pad_len;
+ struct ipr_ioadl_desc *last_ioadl = NULL;
+ int len = qc->nbytes;
struct scatterlist *sg;
+ unsigned int si;
if (len == 0)
return;
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
}
- ata_for_each_sg(sg, qc) {
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
ioadl->address = cpu_to_be32(sg_dma_address(sg));
- if (ata_sg_is_last(sg, qc))
- ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
- else
- ioadl++;
+
+ last_ioadl = ioadl;
+ ioadl++;
}
+
+ if (likely(last_ioadl))
+ last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
}
/**
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
- ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
+ ipr_cmd->dma_use_sg = qc->n_elem;
ipr_build_ata_ioadl(ipr_cmd, qc);
regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
break;
- case ATA_PROT_ATAPI:
- case ATA_PROT_ATAPI_NODATA:
+ case ATAPI_PROT_PIO:
+ case ATAPI_PROT_NODATA:
regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
break;
- case ATA_PROT_ATAPI_DMA:
+ case ATAPI_PROT_DMA:
regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
break;
}
/**
- * ipr_ata_check_status - Return last ATA status
- * @ap: ATA port
+ * ipr_qc_fill_rtf - Read result TF
+ * @qc: ATA queued command
*
* Return value:
- * ATA status
+ * true
**/
-static u8 ipr_ata_check_status(struct ata_port *ap)
+static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
{
- struct ipr_sata_port *sata_port = ap->private_data;
- return sata_port->ioasa.status;
-}
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+ struct ipr_ioasa_gata *g = &sata_port->ioasa;
+ struct ata_taskfile *tf = &qc->result_tf;
-/**
- * ipr_ata_check_altstatus - Return last ATA altstatus
- * @ap: ATA port
- *
- * Return value:
- * Alt ATA status
- **/
-static u8 ipr_ata_check_altstatus(struct ata_port *ap)
-{
- struct ipr_sata_port *sata_port = ap->private_data;
- return sata_port->ioasa.alt_status;
+ tf->feature = g->error;
+ tf->nsect = g->nsect;
+ tf->lbal = g->lbal;
+ tf->lbam = g->lbam;
+ tf->lbah = g->lbah;
+ tf->device = g->device;
+ tf->command = g->status;
+ tf->hob_nsect = g->hob_nsect;
+ tf->hob_lbal = g->hob_lbal;
+ tf->hob_lbam = g->hob_lbam;
+ tf->hob_lbah = g->hob_lbah;
+ tf->ctl = g->alt_status;
+
+ return true;
}
static struct ata_port_operations ipr_sata_ops = {
- .port_disable = ata_port_disable,
- .check_status = ipr_ata_check_status,
- .check_altstatus = ipr_ata_check_altstatus,
- .dev_select = ata_noop_dev_select,
.phy_reset = ipr_ata_phy_reset,
+ .hardreset = ipr_sata_reset,
.post_internal_cmd = ipr_ata_post_internal,
- .tf_read = ipr_tf_read,
.qc_prep = ata_noop_qc_prep,
.qc_issue = ipr_qc_issue,
+ .qc_fill_rtf = ipr_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop
};
**/
static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
{
- u8 rev_id;
int i;
- if (ioa_cfg->type == 0x5702) {
- if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
- &rev_id) == PCIBIOS_SUCCESSFUL) {
- if (rev_id < 4) {
- for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
- if (__is_processor(ipr_blocked_processors[i]))
- return 1;
- }
- }
+ if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
+ for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
+ if (__is_processor(ipr_blocked_processors[i]))
+ return 1;
}
}
return 0;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
+ spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ spin_lock(ioa_cfg->host->host_lock);
if (!ioa_cfg->allow_cmds)
scsi_block_requests(ioa_cfg->host);
}
/**
+ * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * This function enables dual IOA RAID support if possible.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
+ struct ipr_mode_page24 *mode_page;
+ int length;
+
+ ENTER;
+ mode_page = ipr_get_mode_page(mode_pages, 0x24,
+ sizeof(struct ipr_mode_page24));
+
+ if (mode_page)
+ mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
+
+ length = mode_pages->hdr.length + 1;
+ mode_pages->hdr.length = 0;
+
+ ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
+ length);
+
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
+ * @ipr_cmd: ipr command struct
+ *
+ * This function handles the failure of a Mode Sense to the IOAFP.
+ * Some adapters do not handle all mode pages.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
+{
+ u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+ if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ return ipr_reset_cmd_failed(ipr_cmd);
+}
+
+/**
+ * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * This function send a mode sense to the IOA to retrieve
+ * the IOA Advanced Function Control mode page.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
+ 0x24, ioa_cfg->vpd_cbs_dma +
+ offsetof(struct ipr_misc_cbs, mode_pages),
+ sizeof(struct ipr_mode_pages));
+
+ ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
+ ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
* ipr_init_res_table - Initialize the resource table
* @ipr_cmd: ipr command struct
*
}
}
- ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+ if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
+ else
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
LEAVE;
return IPR_RC_JOB_CONTINUE;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+ struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
ENTER;
+ if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
+ ioa_cfg->dual_raid = 1;
dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
ucode_vpd->major_release, ucode_vpd->card_type,
ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
}
/**
+ * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a Page 0xD0 inquiry to the adapter
+ * to retrieve adapter capabilities.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
+ struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
+ memset(cap, 0, sizeof(*cap));
+
+ if (ipr_inquiry_page_supported(page0, 0xD0)) {
+ ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
+ sizeof(struct ipr_inquiry_cap));
+ return IPR_RC_JOB_RETURN;
+ }
+
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
* ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
* @ipr_cmd: ipr command struct
*
if (!ipr_inquiry_page_supported(page0, 1))
ioa_cfg->cache_state = CACHE_NONE;
- ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
+ ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
ipr_ioafp_inquiry(ipr_cmd, 1, 3,
ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
struct ipr_hostrcb *hostrcb;
struct ipr_uc_sdt sdt;
int rc, length;
+ u32 ioasc;
mailbox = readl(ioa_cfg->ioa_mailbox);
(__be32 *)&hostrcb->hcam,
min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
- if (!rc)
+ if (!rc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
- else
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+ if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
+ ioa_cfg->sdt_state == GET_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ } else
ipr_unit_check_no_data(ioa_cfg);
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
}
/**
+ * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This clears PCI reset to the adapter and delays two seconds.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
+{
+ ENTER;
+ pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
+ ipr_cmd->job_step = ipr_reset_bist_done;
+ ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This asserts PCI reset to the adapter.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ ENTER;
+ pci_block_user_cfg_access(pdev);
+ pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ ipr_cmd->job_step = ipr_reset_slot_reset_done;
+ ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
* ipr_reset_allowed - Query whether or not IOA can be reset
* @ioa_cfg: ioa config struct
*
ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
} else {
- ipr_cmd->job_step = ipr_reset_start_bist;
+ ipr_cmd->job_step = ioa_cfg->reset;
rc = IPR_RC_JOB_CONTINUE;
}
writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
} else {
- ipr_cmd->job_step = ipr_reset_start_bist;
+ ipr_cmd->job_step = ioa_cfg->reset;
}
ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
- if (shutdown_type == IPR_SHUTDOWN_ABBREV)
- timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
+ if (shutdown_type == IPR_SHUTDOWN_NORMAL)
+ timeout = IPR_SHUTDOWN_TIMEOUT;
else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
timeout = IPR_INTERNAL_TIMEOUT;
+ else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
+ timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
else
- timeout = IPR_SHUTDOWN_TIMEOUT;
+ timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
- IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->needs_warm_reset)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ else
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+ IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
return PCI_ERS_RESULT_RECOVERED;
}
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
+ ioa_cfg->allow_cmds = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
* where it can accept new commands.
* Return value:
- * 0 on sucess / -EIO on failure
+ * 0 on success / -EIO on failure
**/
static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
{
ENTER;
free_irq(pdev->irq, ioa_cfg);
+ pci_disable_msi(pdev);
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg->sdt_state = INACTIVE;
if (ipr_enable_cache)
ioa_cfg->cache_state = CACHE_ENABLED;
}
/**
- * ipr_get_chip_cfg - Find adapter chip configuration
+ * ipr_get_chip_info - Find adapter chip information
* @dev_id: PCI device id struct
*
* Return value:
- * ptr to chip config on success / NULL on failure
+ * ptr to chip information on success / NULL on failure
**/
-static const struct ipr_chip_cfg_t * __devinit
-ipr_get_chip_cfg(const struct pci_device_id *dev_id)
+static const struct ipr_chip_t * __devinit
+ipr_get_chip_info(const struct pci_device_id *dev_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
if (ipr_chip[i].vendor == dev_id->vendor &&
ipr_chip[i].device == dev_id->device)
- return ipr_chip[i].cfg;
+ return &ipr_chip[i];
return NULL;
}
/**
+ * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
+ * @pdev: PCI device struct
+ *
+ * Description: Simply set the msi_received flag to 1 indicating that
+ * Message Signaled Interrupts are supported.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
+ unsigned long lock_flags = 0;
+ irqreturn_t rc = IRQ_HANDLED;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ ioa_cfg->msi_received = 1;
+ wake_up(&ioa_cfg->msi_wait_q);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
+ * @pdev: PCI device struct
+ *
+ * Description: The return value from pci_enable_msi() can not always be
+ * trusted. This routine sets up and initiates a test interrupt to determine
+ * if the interrupt is received via the ipr_test_intr() service routine.
+ * If the tests fails, the driver will fall back to LSI.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
+{
+ int rc;
+ volatile u32 int_reg;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ ioa_cfg->msi_received = 0;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ return rc;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (!ioa_cfg->msi_received) {
+ /* MSI test failed */
+ dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
+ rc = -EOPNOTSUPP;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "MSI test succeeded.\n");
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ free_irq(pdev->irq, ioa_cfg);
+
+ LEAVE;
+
+ return rc;
+}
+
+/**
* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
unsigned long ipr_regs_pci;
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
- volatile u32 mask, uproc;
+ volatile u32 mask, uproc, interrupts;
ENTER;
ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops);
- ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
+ ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
- if (!ioa_cfg->chip_cfg) {
+ if (!ioa_cfg->ipr_chip) {
dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
dev_id->vendor, dev_id->device);
goto out_scsi_host_put;
}
+ ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+
if (ipr_transop_timeout)
ioa_cfg->transop_timeout = ipr_transop_timeout;
else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
else
ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
+ ioa_cfg->revid = pdev->revision;
+
ipr_regs_pci = pci_resource_start(pdev, 0);
rc = pci_request_regions(pdev, IPR_NAME);
goto out_scsi_host_put;
}
- ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
+ ipr_regs = pci_ioremap_bar(pdev, 0);
if (!ipr_regs) {
dev_err(&pdev->dev,
pci_set_master(pdev);
- rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
goto cleanup_nomem;
goto cleanup_nomem;
}
+ /* Enable MSI style interrupts if they are supported. */
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ rc = ipr_test_msi(ioa_cfg, pdev);
+ if (rc == -EOPNOTSUPP)
+ pci_disable_msi(pdev);
+ else if (rc)
+ goto out_msi_disable;
+ else
+ dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
* the card is in an unknown state and needs a hard reset
*/
mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
ioa_cfg->needs_hard_reset = 1;
+ if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
+ ioa_cfg->needs_hard_reset = 1;
+ if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
+ ioa_cfg->ioa_unit_checked = 1;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
+ rc = request_irq(pdev->irq, ipr_isr,
+ ioa_cfg->msi_received ? 0 : IRQF_SHARED,
+ IPR_NAME, ioa_cfg);
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
goto cleanup_nolog;
}
+ if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
+ (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
+ ioa_cfg->needs_warm_reset = 1;
+ ioa_cfg->reset = ipr_reset_slot_reset;
+ } else
+ ioa_cfg->reset = ipr_reset_start_bist;
+
spin_lock(&ipr_driver_lock);
list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
spin_unlock(&ipr_driver_lock);
ipr_free_mem(ioa_cfg);
cleanup_nomem:
iounmap(ipr_regs);
+out_msi_disable:
+ pci_disable_msi(pdev);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
* Return value:
* none
**/
-static void ipr_remove(struct pci_dev *pdev)
+static void __devexit ipr_remove(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
ENTER;
- ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
+ ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_trace_attr);
- ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
+ ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_dump_attr);
scsi_remove_host(ioa_cfg->host);
return rc;
}
- rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
+ rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_trace_attr);
if (rc) {
return rc;
}
- rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
+ rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_dump_attr);
if (rc) {
- ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
+ ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_trace_attr);
scsi_remove_host(ioa_cfg->host);
__ipr_remove(pdev);
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
- PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
- PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT},
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
- PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
- IPR_USE_LONG_TRANSOP_TIMEOUT },
+ IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
.name = IPR_NAME,
.id_table = ipr_pci_table,
.probe = ipr_probe,
- .remove = ipr_remove,
+ .remove = __devexit_p(ipr_remove),
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
- .dynids.use_driver_data = 1
};
/**