X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fscsi%2Fipr.c;h=c09d77591f92b8576432544243781ed4df11d39a;hb=dd26bf6d95f050c42cc8f15e750b09851e1fd30b;hp=39133748fb737f5ef536d5776b2847dfbe1a4f60;hpb=7dce0e1c84cfa8fb2a4b41877c20def386cade2b;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 3913374..c09d775 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #include #include @@ -84,15 +85,15 @@ /* * Global Data */ -static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head); +static LIST_HEAD(ipr_ioa_head); static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; static unsigned int ipr_max_speed = 1; static int ipr_testmode = 0; static unsigned int ipr_fastfail = 0; -static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; +static unsigned int ipr_transop_timeout = 0; static unsigned int ipr_enable_cache = 1; static unsigned int ipr_debug = 0; -static int ipr_auto_create = 1; +static unsigned int ipr_dual_ioa_raid = 1; static DEFINE_SPINLOCK(ipr_driver_lock); /* This table describes the differences between DMA controller chips */ @@ -151,23 +152,23 @@ module_param_named(log_level, ipr_log_level, uint, 0); MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); module_param_named(testmode, ipr_testmode, int, 0); MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); -module_param_named(fastfail, ipr_fastfail, int, 0); +module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); module_param_named(transop_timeout, ipr_transop_timeout, int, 0); MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); module_param_named(enable_cache, ipr_enable_cache, int, 0); MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); -module_param_named(debug, ipr_debug, int, 0); +module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); -module_param_named(auto_create, ipr_auto_create, int, 0); -MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)"); +module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); +MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IPR_DRIVER_VERSION); /* A constant array of IOASCs/URCs/Error Messages */ static const struct ipr_error_table_t ipr_error_table[] = { - {0x00000000, 1, 1, + {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, "8155: An unknown error was received"}, {0x00330000, 0, 0, "Soft underlength error"}, @@ -175,125 +176,127 @@ struct ipr_error_table_t ipr_error_table[] = { "Command to be cancelled not found"}, {0x00808000, 0, 0, "Qualified success"}, - {0x01080000, 1, 1, + {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, "FFFE: Soft device bus error recovered by the IOA"}, - {0x01088100, 0, 1, + {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, "4101: Soft device bus fabric error"}, - {0x01170600, 0, 1, + {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, "FFF9: Device sector reassign successful"}, - {0x01170900, 0, 1, + {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, "FFF7: Media error recovered by device rewrite procedures"}, - {0x01180200, 0, 1, + {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, "7001: IOA sector reassignment successful"}, - {0x01180500, 0, 1, + {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, "FFF9: Soft media error. Sector reassignment recommended"}, - {0x01180600, 0, 1, + {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, "FFF7: Media error recovered by IOA rewrite procedures"}, - {0x01418000, 0, 1, + {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, "FF3D: Soft PCI bus error recovered by the IOA"}, - {0x01440000, 1, 1, + {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF6: Device hardware error recovered by the IOA"}, - {0x01448100, 0, 1, + {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, "FFF6: Device hardware error recovered by the device"}, - {0x01448200, 1, 1, + {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, "FF3D: Soft IOA error recovered by the IOA"}, - {0x01448300, 0, 1, + {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, "FFFA: Undefined device response recovered by the IOA"}, - {0x014A0000, 1, 1, + {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF6: Device bus error, message or command phase"}, - {0x014A8000, 0, 1, + {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFE: Task Management Function failed"}, - {0x015D0000, 0, 1, + {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF6: Failure prediction threshold exceeded"}, - {0x015D9200, 0, 1, + {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, "8009: Impending cache battery pack failure"}, {0x02040400, 0, 0, "34FF: Disk device format in progress"}, + {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, + "9070: IOA requested reset"}, {0x023F0000, 0, 0, "Synchronization required"}, {0x024E0000, 0, 0, "No ready, IOA shutdown"}, {0x025A0000, 0, 0, "Not ready, IOA has been shutdown"}, - {0x02670100, 0, 1, + {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, "3020: Storage subsystem configuration error"}, {0x03110B00, 0, 0, "FFF5: Medium error, data unreadable, recommend reassign"}, {0x03110C00, 0, 0, "7000: Medium error, data unreadable, do not reassign"}, - {0x03310000, 0, 1, + {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF3: Disk media format bad"}, - {0x04050000, 0, 1, + {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, "3002: Addressed device failed to respond to selection"}, - {0x04080000, 1, 1, + {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, "3100: Device bus error"}, - {0x04080100, 0, 1, + {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, "3109: IOA timed out a device command"}, {0x04088000, 0, 0, "3120: SCSI bus is not operational"}, - {0x04088100, 0, 1, + {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, "4100: Hard device bus fabric error"}, - {0x04118000, 0, 1, + {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, "9000: IOA reserved area data check"}, - {0x04118100, 0, 1, + {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, "9001: IOA reserved area invalid data pattern"}, - {0x04118200, 0, 1, + {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, "9002: IOA reserved area LRC error"}, - {0x04320000, 0, 1, + {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, "102E: Out of alternate sectors for disk storage"}, - {0x04330000, 1, 1, + {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF4: Data transfer underlength error"}, - {0x04338000, 1, 1, + {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF4: Data transfer overlength error"}, - {0x043E0100, 0, 1, + {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, "3400: Logical unit failure"}, - {0x04408500, 0, 1, + {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, "FFF4: Device microcode is corrupt"}, - {0x04418000, 1, 1, + {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, "8150: PCI bus error"}, {0x04430000, 1, 0, "Unsupported device bus message received"}, - {0x04440000, 1, 1, + {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF4: Disk device problem"}, - {0x04448200, 1, 1, + {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, "8150: Permanent IOA failure"}, - {0x04448300, 0, 1, + {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, "3010: Disk device returned wrong response to IOA"}, - {0x04448400, 0, 1, + {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, "8151: IOA microcode error"}, {0x04448500, 0, 0, "Device bus status error"}, - {0x04448600, 0, 1, + {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, "8157: IOA error requiring IOA reset to recover"}, {0x04448700, 0, 0, "ATA device status error"}, {0x04490000, 0, 0, "Message reject received from the device"}, - {0x04449200, 0, 1, + {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, "8008: A permanent cache battery pack failure occurred"}, - {0x0444A000, 0, 1, + {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, "9090: Disk unit has been modified after the last known status"}, - {0x0444A200, 0, 1, + {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, "9081: IOA detected device error"}, - {0x0444A300, 0, 1, + {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, "9082: IOA detected device error"}, - {0x044A0000, 1, 1, + {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, "3110: Device bus error, message or command phase"}, - {0x044A8000, 1, 1, + {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, "3110: SAS Command / Task Management Function failed"}, - {0x04670400, 0, 1, + {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, "9091: Incorrect hardware configuration change has been detected"}, - {0x04678000, 0, 1, + {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, "9073: Invalid multi-adapter configuration"}, - {0x04678100, 0, 1, + {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, "4010: Incorrect connection between cascaded expanders"}, - {0x04678200, 0, 1, + {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, "4020: Connections exceed IOA design limits"}, - {0x04678300, 0, 1, + {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, "4030: Incorrect multipath connection"}, - {0x04679000, 0, 1, + {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, "4110: Unsupported enclosure function"}, - {0x046E0000, 0, 1, + {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF4: Command to logical unit failed"}, {0x05240000, 1, 0, "Illegal request, invalid request type or request packet"}, @@ -313,101 +316,105 @@ struct ipr_error_table_t ipr_error_table[] = { "Illegal request, command sequence error"}, {0x052C8000, 1, 0, "Illegal request, dual adapter support not enabled"}, - {0x06040500, 0, 1, + {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, "9031: Array protection temporarily suspended, protection resuming"}, - {0x06040600, 0, 1, + {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, "9040: Array protection temporarily suspended, protection resuming"}, - {0x06288000, 0, 1, + {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, "3140: Device bus not ready to ready transition"}, - {0x06290000, 0, 1, + {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFB: SCSI bus was reset"}, {0x06290500, 0, 0, "FFFE: SCSI bus transition to single ended"}, {0x06290600, 0, 0, "FFFE: SCSI bus transition to LVD"}, - {0x06298000, 0, 1, + {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFB: SCSI bus was reset by another initiator"}, - {0x063F0300, 0, 1, + {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, "3029: A device replacement has occurred"}, - {0x064C8000, 0, 1, + {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, "9051: IOA cache data exists for a missing or failed device"}, - {0x064C8100, 0, 1, + {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, - {0x06670100, 0, 1, + {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, "9025: Disk unit is not supported at its physical location"}, - {0x06670600, 0, 1, + {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, "3020: IOA detected a SCSI bus configuration error"}, - {0x06678000, 0, 1, + {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, "3150: SCSI bus configuration error"}, - {0x06678100, 0, 1, + {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, "9074: Asymmetric advanced function disk configuration"}, - {0x06678300, 0, 1, + {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, "4040: Incomplete multipath connection between IOA and enclosure"}, - {0x06678400, 0, 1, + {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, "4041: Incomplete multipath connection between enclosure and device"}, - {0x06678500, 0, 1, + {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, "9075: Incomplete multipath connection between IOA and remote IOA"}, - {0x06678600, 0, 1, + {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, "9076: Configuration error, missing remote IOA"}, - {0x06679100, 0, 1, + {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, "4050: Enclosure does not support a required multipath function"}, - {0x06690200, 0, 1, + {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, + "4070: Logically bad block written on device"}, + {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, "9041: Array protection temporarily suspended"}, - {0x06698200, 0, 1, + {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, "9042: Corrupt array parity detected on specified device"}, - {0x066B0200, 0, 1, + {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, "9030: Array no longer protected due to missing or failed disk unit"}, - {0x066B8000, 0, 1, + {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, "9071: Link operational transition"}, - {0x066B8100, 0, 1, + {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, "9072: Link not operational transition"}, - {0x066B8200, 0, 1, + {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, "9032: Array exposed but still protected"}, - {0x066B9100, 0, 1, + {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1, + "70DD: Device forced failed by disrupt device command"}, + {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, "4061: Multipath redundancy level got better"}, - {0x066B9200, 0, 1, + {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, "4060: Multipath redundancy level got worse"}, {0x07270000, 0, 0, "Failure due to other device"}, - {0x07278000, 0, 1, + {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, "9008: IOA does not support functions expected by devices"}, - {0x07278100, 0, 1, + {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, "9010: Cache data associated with attached devices cannot be found"}, - {0x07278200, 0, 1, + {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, "9011: Cache data belongs to devices other than those attached"}, - {0x07278400, 0, 1, + {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, "9020: Array missing 2 or more devices with only 1 device present"}, - {0x07278500, 0, 1, + {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, "9021: Array missing 2 or more devices with 2 or more devices present"}, - {0x07278600, 0, 1, + {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, "9022: Exposed array is missing a required device"}, - {0x07278700, 0, 1, + {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, "9023: Array member(s) not at required physical locations"}, - {0x07278800, 0, 1, + {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, "9024: Array not functional due to present hardware configuration"}, - {0x07278900, 0, 1, + {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, "9026: Array not functional due to present hardware configuration"}, - {0x07278A00, 0, 1, + {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, "9027: Array is missing a device and parity is out of sync"}, - {0x07278B00, 0, 1, + {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, "9028: Maximum number of arrays already exist"}, - {0x07278C00, 0, 1, + {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, "9050: Required cache data cannot be located for a disk unit"}, - {0x07278D00, 0, 1, + {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, "9052: Cache data exists for a device that has been modified"}, - {0x07278F00, 0, 1, + {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, "9054: IOA resources not available due to previous problems"}, - {0x07279100, 0, 1, + {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, "9092: Disk unit requires initialization before use"}, - {0x07279200, 0, 1, + {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, "9029: Incorrect hardware configuration change has been detected"}, - {0x07279600, 0, 1, + {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, "9060: One or more disk pairs are missing from an array"}, - {0x07279700, 0, 1, + {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, "9061: One or more disks are missing from an array"}, - {0x07279800, 0, 1, + {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, "9062: One or more disks are missing from an array"}, - {0x07279900, 0, 1, + {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, "9063: Maximum number of functional arrays has been exceeded"}, {0x0B260000, 0, 0, "Aborted command, invalid descriptor"}, @@ -481,12 +488,16 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); ioarcb->write_data_transfer_length = 0; ioarcb->read_data_transfer_length = 0; ioarcb->write_ioadl_len = 0; ioarcb->read_ioadl_len = 0; + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; ioasa->ioasc = 0; ioasa->residual_data_len = 0; ioasa->u.gata.status = 0; @@ -532,32 +543,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) } /** - * ipr_unmap_sglist - Unmap scatterlist if mapped - * @ioa_cfg: ioa config struct - * @ipr_cmd: ipr command struct - * - * Return value: - * nothing - **/ -static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_cmnd *ipr_cmd) -{ - struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; - - if (ipr_cmd->dma_use_sg) { - if (scsi_cmd->use_sg > 0) { - pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer, - scsi_cmd->use_sg, - scsi_cmd->sc_data_direction); - } else { - pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle, - scsi_cmd->request_bufflen, - scsi_cmd->sc_data_direction); - } - } -} - -/** * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts * @ioa_cfg: ioa config struct * @clr_ints: interrupts to clear @@ -669,7 +654,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) scsi_cmd->result |= (DID_ERROR << 16); - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_cmd->scsi_done(scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); } @@ -948,6 +933,53 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) } /** + * strip_and_pad_whitespace - Strip and pad trailing whitespace. + * @i: index into buffer + * @buf: string to modify + * + * This function will strip all trailing whitespace, pad the end + * of the string with a single space, and NULL terminate the string. + * + * Return value: + * new length of string + **/ +static int strip_and_pad_whitespace(int i, char *buf) +{ + while (i && buf[i] == ' ') + i--; + buf[i+1] = ' '; + buf[i+2] = '\0'; + return i + 2; +} + +/** + * ipr_log_vpd_compact - Log the passed extended VPD compactly. + * @prefix: string to print at start of printk + * @hostrcb: hostrcb pointer + * @vpd: vendor/product id/sn struct + * + * Return value: + * none + **/ +static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, + struct ipr_vpd *vpd) +{ + char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; + int i = 0; + + memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); + i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); + + memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); + i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); + + memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); + buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; + + ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); +} + +/** * ipr_log_vpd - Log the passed VPD to the error log. * @vpd: vendor/product id/sn struct * @@ -971,6 +1003,23 @@ static void ipr_log_vpd(struct ipr_vpd *vpd) } /** + * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. + * @prefix: string to print at start of printk + * @hostrcb: hostrcb pointer + * @vpd: vendor/product id/sn/wwn struct + * + * Return value: + * none + **/ +static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, + struct ipr_ext_vpd *vpd) +{ + ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); + ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, + be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); +} + +/** * ipr_log_ext_vpd - Log the passed extended VPD to the error log. * @vpd: vendor/product id/sn/wwn struct * @@ -1284,10 +1333,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, error = &hostrcb->hcam.u.error.u.type_17_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + strstrip(error->failure_reason); - ipr_err("%s\n", error->failure_reason); - ipr_err("Remote Adapter VPD:\n"); - ipr_log_ext_vpd(&error->vpd); + ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, + be32_to_cpu(hostrcb->hcam.u.error.prc)); + ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); ipr_log_hex_data(ioa_cfg, error->data, be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb_error, u) + @@ -1309,10 +1359,11 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, error = &hostrcb->hcam.u.error.u.type_07_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + strstrip(error->failure_reason); - ipr_err("%s\n", error->failure_reason); - ipr_err("Remote Adapter VPD:\n"); - ipr_log_vpd(&error->vpd); + ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, + be32_to_cpu(hostrcb->hcam.u.error.prc)); + ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); ipr_log_hex_data(ioa_cfg, error->data, be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb_error, u) + @@ -1610,7 +1661,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, /* Set indication we have logged an error */ ioa_cfg->errors_logged++; - if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) + if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) return; if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); @@ -1669,12 +1720,15 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); list_del(&hostrcb->queue); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); if (!ioasc) { ipr_handle_log_data(ioa_cfg, hostrcb); + if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { dev_err(&ioa_cfg->pdev->dev, "Host RCB failed with IOASC: 0x%08X\n", ioasc); @@ -2132,7 +2186,7 @@ static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, sizeof(struct ipr_dump_entry_header); driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; - strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id); + strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); driver_dump->hdr.num_entries++; } @@ -2380,7 +2434,7 @@ restart: } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE); + kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); LEAVE; } @@ -2388,6 +2442,7 @@ restart: /** * ipr_read_trace - Dump the adapter trace * @kobj: kobject struct + * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size @@ -2395,27 +2450,22 @@ restart: * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_read_trace(struct kobject *kobj, char *buf, - loff_t off, size_t count) +static ssize_t ipr_read_trace(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { - struct class_device *cdev = container_of(kobj,struct class_device,kobj); - struct Scsi_Host *shost = class_to_shost(cdev); + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; - int size = IPR_TRACE_SIZE; - char *src = (char *)ioa_cfg->trace; - - if (off > size) - return 0; - if (off + count > size) { - size -= off; - count = size; - } + ssize_t ret; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - memcpy(buf, &src[off], count); + ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, + IPR_TRACE_SIZE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return count; + + return ret; } static struct bin_attribute ipr_trace_attr = { @@ -2439,15 +2489,16 @@ static const struct { /** * ipr_show_write_caching - Show the write caching attribute - * @class_dev: class device struct - * @buf: buffer + * @dev: device struct + * @buf: buffer * * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf) +static ssize_t ipr_show_write_caching(struct device *dev, + struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int i, len = 0; @@ -2466,19 +2517,20 @@ static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf) /** * ipr_store_write_caching - Enable/disable adapter write cache - * @class_dev: class_device struct - * @buf: buffer - * @count: buffer size + * @dev: device struct + * @buf: buffer + * @count: buffer size * * This function will enable/disable adapter write cache. * * Return value: * count on success / other on failure **/ -static ssize_t ipr_store_write_caching(struct class_device *class_dev, - const char *buf, size_t count) +static ssize_t ipr_store_write_caching(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; enum ipr_cache_state new_state = CACHE_INVALID; @@ -2516,7 +2568,7 @@ static ssize_t ipr_store_write_caching(struct class_device *class_dev, return count; } -static struct class_device_attribute ipr_ioa_cache_attr = { +static struct device_attribute ipr_ioa_cache_attr = { .attr = { .name = "write_cache", .mode = S_IRUGO | S_IWUSR, @@ -2527,15 +2579,16 @@ static struct class_device_attribute ipr_ioa_cache_attr = { /** * ipr_show_fw_version - Show the firmware version - * @class_dev: class device struct - * @buf: buffer + * @dev: class device struct + * @buf: buffer * * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf) +static ssize_t ipr_show_fw_version(struct device *dev, + struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; unsigned long lock_flags = 0; @@ -2550,7 +2603,7 @@ static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf) return len; } -static struct class_device_attribute ipr_fw_version_attr = { +static struct device_attribute ipr_fw_version_attr = { .attr = { .name = "fw_version", .mode = S_IRUGO, @@ -2560,15 +2613,16 @@ static struct class_device_attribute ipr_fw_version_attr = { /** * ipr_show_log_level - Show the adapter's error logging level - * @class_dev: class device struct - * @buf: buffer + * @dev: class device struct + * @buf: buffer * * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf) +static ssize_t ipr_show_log_level(struct device *dev, + struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int len; @@ -2581,16 +2635,17 @@ static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf) /** * ipr_store_log_level - Change the adapter's error logging level - * @class_dev: class device struct - * @buf: buffer + * @dev: class device struct + * @buf: buffer * * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_store_log_level(struct class_device *class_dev, +static ssize_t ipr_store_log_level(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; @@ -2600,7 +2655,7 @@ static ssize_t ipr_store_log_level(struct class_device *class_dev, return strlen(buf); } -static struct class_device_attribute ipr_log_level_attr = { +static struct device_attribute ipr_log_level_attr = { .attr = { .name = "log_level", .mode = S_IRUGO | S_IWUSR, @@ -2611,9 +2666,9 @@ static struct class_device_attribute ipr_log_level_attr = { /** * ipr_store_diagnostics - IOA Diagnostics interface - * @class_dev: class_device struct - * @buf: buffer - * @count: buffer size + * @dev: device struct + * @buf: buffer + * @count: buffer size * * This function will reset the adapter and wait a reasonable * amount of time for any errors that the adapter might log. @@ -2621,10 +2676,11 @@ static struct class_device_attribute ipr_log_level_attr = { * Return value: * count on success / other on failure **/ -static ssize_t ipr_store_diagnostics(struct class_device *class_dev, +static ssize_t ipr_store_diagnostics(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int rc = count; @@ -2632,8 +2688,13 @@ static ssize_t ipr_store_diagnostics(struct class_device *class_dev, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + ioa_cfg->errors_logged = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); @@ -2656,7 +2717,7 @@ static ssize_t ipr_store_diagnostics(struct class_device *class_dev, return rc; } -static struct class_device_attribute ipr_diagnostics_attr = { +static struct device_attribute ipr_diagnostics_attr = { .attr = { .name = "run_diagnostics", .mode = S_IWUSR, @@ -2666,15 +2727,16 @@ static struct class_device_attribute ipr_diagnostics_attr = { /** * ipr_show_adapter_state - Show the adapter's state - * @class_dev: class device struct - * @buf: buffer + * @class_dev: device struct + * @buf: buffer * * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf) +static ssize_t ipr_show_adapter_state(struct device *dev, + struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int len; @@ -2690,19 +2752,20 @@ static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf) /** * ipr_store_adapter_state - Change adapter state - * @class_dev: class_device struct - * @buf: buffer - * @count: buffer size + * @dev: device struct + * @buf: buffer + * @count: buffer size * * This function will change the adapter's state. * * Return value: * count on success / other on failure **/ -static ssize_t ipr_store_adapter_state(struct class_device *class_dev, +static ssize_t ipr_store_adapter_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags; int result = count; @@ -2723,9 +2786,9 @@ static ssize_t ipr_store_adapter_state(struct class_device *class_dev, return result; } -static struct class_device_attribute ipr_ioa_state_attr = { +static struct device_attribute ipr_ioa_state_attr = { .attr = { - .name = "state", + .name = "online_state", .mode = S_IRUGO | S_IWUSR, }, .show = ipr_show_adapter_state, @@ -2734,19 +2797,20 @@ static struct class_device_attribute ipr_ioa_state_attr = { /** * ipr_store_reset_adapter - Reset the adapter - * @class_dev: class_device struct - * @buf: buffer - * @count: buffer size + * @dev: device struct + * @buf: buffer + * @count: buffer size * * This function will reset the adapter. * * Return value: * count on success / other on failure **/ -static ssize_t ipr_store_reset_adapter(struct class_device *class_dev, +static ssize_t ipr_store_reset_adapter(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags; int result = count; @@ -2763,7 +2827,7 @@ static ssize_t ipr_store_reset_adapter(struct class_device *class_dev, return result; } -static struct class_device_attribute ipr_ioa_reset_attr = { +static struct device_attribute ipr_ioa_reset_attr = { .attr = { .name = "reset_host", .mode = S_IWUSR, @@ -2814,6 +2878,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) } scatterlist = sglist->scatterlist; + sg_init_table(scatterlist, num_elem); sglist->order = order; sglist->num_sg = num_elem; @@ -2826,12 +2891,12 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) /* Free up what we already allocated */ for (j = i - 1; j >= 0; j--) - __free_pages(scatterlist[j].page, order); + __free_pages(sg_page(&scatterlist[j]), order); kfree(sglist); return NULL; } - scatterlist[i].page = page; + sg_set_page(&scatterlist[i], page, 0, 0); } return sglist; @@ -2852,7 +2917,7 @@ static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) int i; for (i = 0; i < sglist->num_sg; i++) - __free_pages(sglist->scatterlist[i].page, sglist->order); + __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order); kfree(sglist); } @@ -2882,9 +2947,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, scatterlist = sglist->scatterlist; for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { - kaddr = kmap(scatterlist[i].page); + struct page *page = sg_page(&scatterlist[i]); + + kaddr = kmap(page); memcpy(kaddr, buffer, bsize_elem); - kunmap(scatterlist[i].page); + kunmap(page); scatterlist[i].length = bsize_elem; @@ -2895,9 +2962,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, } if (len % bsize_elem) { - kaddr = kmap(scatterlist[i].page); + struct page *page = sg_page(&scatterlist[i]); + + kaddr = kmap(page); memcpy(kaddr, buffer, len % bsize_elem); - kunmap(scatterlist[i].page); + kunmap(page); scatterlist[i].length = len % bsize_elem; } @@ -2955,6 +3024,11 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, unsigned long lock_flags; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } if (ioa_cfg->ucode_sglist) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -2986,19 +3060,20 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, /** * ipr_store_update_fw - Update the firmware on the adapter - * @class_dev: class_device struct - * @buf: buffer - * @count: buffer size + * @class_dev: device struct + * @buf: buffer + * @count: buffer size * * This function will update the firmware on the adapter. * * Return value: * count on success / other on failure **/ -static ssize_t ipr_store_update_fw(struct class_device *class_dev, - const char *buf, size_t count) +static ssize_t ipr_store_update_fw(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_ucode_image_header *image_hdr; const struct firmware *fw_entry; @@ -3056,7 +3131,7 @@ out: return result; } -static struct class_device_attribute ipr_update_fw_attr = { +static struct device_attribute ipr_update_fw_attr = { .attr = { .name = "update_fw", .mode = S_IWUSR, @@ -3064,7 +3139,7 @@ static struct class_device_attribute ipr_update_fw_attr = { .store = ipr_store_update_fw }; -static struct class_device_attribute *ipr_ioa_attrs[] = { +static struct device_attribute *ipr_ioa_attrs[] = { &ipr_fw_version_attr, &ipr_log_level_attr, &ipr_diagnostics_attr, @@ -3079,6 +3154,7 @@ static struct class_device_attribute *ipr_ioa_attrs[] = { /** * ipr_read_dump - Dump the adapter * @kobj: kobject struct + * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size @@ -3086,10 +3162,11 @@ static struct class_device_attribute *ipr_ioa_attrs[] = { * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_read_dump(struct kobject *kobj, char *buf, - loff_t off, size_t count) +static ssize_t ipr_read_dump(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { - struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct device *cdev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(cdev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_dump *dump; @@ -3240,6 +3317,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) /** * ipr_write_dump - Setup dump state of adapter * @kobj: kobject struct + * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size @@ -3247,10 +3325,11 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) * Return value: * number of bytes printed to buffer **/ -static ssize_t ipr_write_dump(struct kobject *kobj, char *buf, - loff_t off, size_t count) +static ssize_t ipr_write_dump(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { - struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct device *cdev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(cdev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; int rc; @@ -3587,7 +3666,8 @@ static int ipr_slave_configure(struct scsi_device *sdev) sdev->no_uld_attach = 1; } if (ipr_is_vset_device(res)) { - sdev->timeout = IPR_VSET_RW_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, + IPR_VSET_RW_TIMEOUT); blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); } if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) @@ -3762,17 +3842,18 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, /** * ipr_sata_reset - Reset the SATA port - * @ap: SATA port to reset + * @link: SATA link to reset * @classes: class of the attached device * - * This function issues a SATA phy reset to the affected ATA port. + * This function issues a SATA phy reset to the affected ATA link. * * Return value: * 0 on success / non-zero on failure **/ -static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes) +static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) { - struct ipr_sata_port *sata_port = ap->private_data; + struct ipr_sata_port *sata_port = link->ap->private_data; struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; struct ipr_resource_entry *res; unsigned long lock_flags = 0; @@ -3849,6 +3930,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { if (ipr_cmd->scsi_cmd) ipr_cmd->done = ipr_scsi_eh_done; + if (ipr_cmd->qc) + ipr_cmd->done = ipr_sata_eh_done; if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; @@ -3862,8 +3945,15 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) if (ipr_is_gata(res) && res->sata_port) { ap = res->sata_port->ap; spin_unlock_irq(scsi_cmd->device->host->host_lock); - ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL); + ata_std_error_handler(ap); spin_lock_irq(scsi_cmd->device->host->host_lock); + + list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { + if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { + rc = -EIO; + break; + } + } } else rc = ipr_device_reset(ioa_cfg, res); res->resetting_device = 0; @@ -4195,80 +4285,55 @@ static irqreturn_t ipr_isr(int irq, void *devp) static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, struct ipr_cmnd *ipr_cmd) { - int i; - struct scatterlist *sglist; + int i, nseg; + struct scatterlist *sg; u32 length; u32 ioadl_flags = 0; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; - length = scsi_cmd->request_bufflen; - - if (length == 0) + length = scsi_bufflen(scsi_cmd); + if (!length) return 0; - if (scsi_cmd->use_sg) { - ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, - scsi_cmd->request_buffer, - scsi_cmd->use_sg, - scsi_cmd->sc_data_direction); - - if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_WRITE; - ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(length); - ioarcb->write_ioadl_len = - cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); - } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_READ; - ioarcb->read_data_transfer_length = cpu_to_be32(length); - ioarcb->read_ioadl_len = - cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); - } - - sglist = scsi_cmd->request_buffer; + nseg = scsi_dma_map(scsi_cmd); + if (nseg < 0) { + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + return -1; + } - for (i = 0; i < ipr_cmd->dma_use_sg; i++) { - ioadl[i].flags_and_data_len = - cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); - ioadl[i].address = - cpu_to_be32(sg_dma_address(&sglist[i])); - } + ipr_cmd->dma_use_sg = nseg; - if (likely(ipr_cmd->dma_use_sg)) { - ioadl[i-1].flags_and_data_len |= - cpu_to_be32(IPR_IOADL_FLAGS_LAST); - return 0; - } else - dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); - } else { - if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_WRITE; - ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(length); - ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { - ioadl_flags = IPR_IOADL_FLAGS_READ; - ioarcb->read_data_transfer_length = cpu_to_be32(length); - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - } + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(length); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } - ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, - scsi_cmd->request_buffer, length, - scsi_cmd->sc_data_direction); + if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { + ioadl = ioarcb->add_data.u.ioadl; + ioarcb->write_ioadl_addr = + cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + + offsetof(struct ipr_ioarcb, add_data)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } - if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) { - ipr_cmd->dma_use_sg = 1; - ioadl[0].flags_and_data_len = - cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST); - ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle); - return 0; - } else - dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n"); + scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { + ioadl[i].flags_and_data_len = + cpu_to_be32(ioadl_flags | sg_dma_len(sg)); + ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); } - return -1; + ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; } /** @@ -4331,7 +4396,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) res->needs_sync_complete = 1; res->in_erp = 0; } - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); } @@ -4345,11 +4410,9 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) **/ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) { - struct ipr_ioarcb *ioarcb; - struct ipr_ioasa *ioasa; - - ioarcb = &ipr_cmd->ioarcb; - ioasa = &ipr_cmd->ioasa; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); ioarcb->write_data_transfer_length = 0; @@ -4358,6 +4421,9 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) ioarcb->read_ioadl_len = 0; ioasa->ioasc = 0; ioasa->residual_data_len = 0; + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; } /** @@ -4456,12 +4522,13 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, { int i; u16 data_len; - u32 ioasc; + u32 ioasc, fd_ioasc; struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; __be32 *ioasa_data = (__be32 *)ioasa; int error_index; ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; + fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; if (0 == ioasc) return; @@ -4469,13 +4536,19 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) return; - error_index = ipr_get_error(ioasc); + if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) + error_index = ipr_get_error(fd_ioasc); + else + error_index = ipr_get_error(ioasc); if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { /* Don't log an error if the IOA already logged one */ if (ioasa->ilid != 0) return; + if (!ipr_is_gscsi(res)) + return; + if (ipr_error_table[error_index].log_ioasa == 0) return; } @@ -4629,18 +4702,19 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; if (!res) { ipr_scsi_eh_done(ipr_cmd); return; } - if (ipr_is_gscsi(res)) - ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); - else + if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) ipr_gen_sense(ipr_cmd); - switch (ioasc & IPR_IOASC_IOASC_MASK) { + ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); + + switch (masked_ioasc) { case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: if (ipr_is_naca_model(res)) scsi_cmd->result |= (DID_ABORT << 16); @@ -4700,7 +4774,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, break; } - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); } @@ -4721,10 +4795,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); - scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); + scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { - ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_dma_unmap(ipr_cmd->scsi_cmd); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); } else @@ -4837,8 +4911,11 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) struct ipr_resource_entry *res; res = (struct ipr_resource_entry *)sdev->hostdata; - if (res && ipr_is_gata(res)) - return ata_scsi_ioctl(sdev, cmd, arg); + if (res && ipr_is_gata(res)) { + if (cmd == HDIO_GET_IDENTITY) + return -ENOTTY; + return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); + } return -EINVAL; } @@ -4920,22 +4997,22 @@ static void ipr_ata_phy_reset(struct ata_port *ap) rc = ipr_device_reset(ioa_cfg, res); if (rc) { - ap->ops->port_disable(ap); + ata_port_disable(ap); goto out_unlock; } switch(res->cfgte.proto) { case IPR_PROTO_SATA: case IPR_PROTO_SAS_STP: - ap->device[0].class = ATA_DEV_ATA; + ap->link.device[0].class = ATA_DEV_ATA; break; case IPR_PROTO_SATA_ATAPI: case IPR_PROTO_SAS_STP_ATAPI: - ap->device[0].class = ATA_DEV_ATAPI; + ap->link.device[0].class = ATA_DEV_ATAPI; break; default: - ap->device[0].class = ATA_DEV_UNKNOWN; - ap->ops->port_disable(ap); + ap->link.device[0].class = ATA_DEV_UNKNOWN; + ata_port_disable(ap); break; }; @@ -4975,33 +5052,6 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc) } /** - * ipr_tf_read - Read the current ATA taskfile for the ATA port - * @ap: ATA port - * @tf: destination ATA taskfile - * - * Return value: - * none - **/ -static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf) -{ - struct ipr_sata_port *sata_port = ap->private_data; - struct ipr_ioasa_gata *g = &sata_port->ioasa; - - tf->feature = g->error; - tf->nsect = g->nsect; - tf->lbal = g->lbal; - tf->lbam = g->lbam; - tf->lbah = g->lbah; - tf->device = g->device; - tf->command = g->status; - tf->hob_nsect = g->hob_nsect; - tf->hob_lbal = g->hob_lbal; - tf->hob_lbam = g->hob_lbam; - tf->hob_lbah = g->hob_lbah; - tf->ctl = g->alt_status; -} - -/** * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure * @regs: destination * @tf: source ATA taskfile @@ -5073,8 +5123,10 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; - int len = qc->nbytes + qc->pad_len; + struct ipr_ioadl_desc *last_ioadl = NULL; + int len = qc->nbytes; struct scatterlist *sg; + unsigned int si; if (len == 0) return; @@ -5092,14 +5144,16 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); } - ata_for_each_sg(sg, qc) { + for_each_sg(qc->sg, sg, qc->n_elem, si) { ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); ioadl->address = cpu_to_be32(sg_dma_address(sg)); - if (ata_sg_is_last(sg, qc)) - ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); - else - ioadl++; + + last_ioadl = ioadl; + ioadl++; } + + if (likely(last_ioadl)) + last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); } /** @@ -5120,7 +5174,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) struct ipr_ioarcb_ata_regs *regs; if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) - return -EIO; + return AC_ERR_SYSTEM; ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioarcb = &ipr_cmd->ioarcb; @@ -5136,7 +5190,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; - ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; + ipr_cmd->dma_use_sg = qc->n_elem; ipr_build_ata_ioadl(ipr_cmd, qc); regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; @@ -5153,19 +5207,19 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; break; - case ATA_PROT_ATAPI: - case ATA_PROT_ATAPI_NODATA: + case ATAPI_PROT_PIO: + case ATAPI_PROT_NODATA: regs->flags |= IPR_ATA_FLAG_PACKET_CMD; break; - case ATA_PROT_ATAPI_DMA: + case ATAPI_PROT_DMA: regs->flags |= IPR_ATA_FLAG_PACKET_CMD; regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; break; default: WARN_ON(1); - return -1; + return AC_ERR_INVALID; } mb(); @@ -5175,41 +5229,41 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) } /** - * ipr_ata_check_status - Return last ATA status - * @ap: ATA port + * ipr_qc_fill_rtf - Read result TF + * @qc: ATA queued command * * Return value: - * ATA status + * true **/ -static u8 ipr_ata_check_status(struct ata_port *ap) +static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) { - struct ipr_sata_port *sata_port = ap->private_data; - return sata_port->ioasa.status; -} + struct ipr_sata_port *sata_port = qc->ap->private_data; + struct ipr_ioasa_gata *g = &sata_port->ioasa; + struct ata_taskfile *tf = &qc->result_tf; -/** - * ipr_ata_check_altstatus - Return last ATA altstatus - * @ap: ATA port - * - * Return value: - * Alt ATA status - **/ -static u8 ipr_ata_check_altstatus(struct ata_port *ap) -{ - struct ipr_sata_port *sata_port = ap->private_data; - return sata_port->ioasa.alt_status; + tf->feature = g->error; + tf->nsect = g->nsect; + tf->lbal = g->lbal; + tf->lbam = g->lbam; + tf->lbah = g->lbah; + tf->device = g->device; + tf->command = g->status; + tf->hob_nsect = g->hob_nsect; + tf->hob_lbal = g->hob_lbal; + tf->hob_lbam = g->hob_lbam; + tf->hob_lbah = g->hob_lbah; + tf->ctl = g->alt_status; + + return true; } static struct ata_port_operations ipr_sata_ops = { - .port_disable = ata_port_disable, - .check_status = ipr_ata_check_status, - .check_altstatus = ipr_ata_check_altstatus, - .dev_select = ata_noop_dev_select, .phy_reset = ipr_ata_phy_reset, + .hardreset = ipr_sata_reset, .post_internal_cmd = ipr_ata_post_internal, - .tf_read = ipr_tf_read, .qc_prep = ata_noop_qc_prep, .qc_issue = ipr_qc_issue, + .qc_fill_rtf = ipr_qc_fill_rtf, .port_start = ata_sas_port_start, .port_stop = ata_sas_port_stop }; @@ -5248,18 +5302,12 @@ static const u16 ipr_blocked_processors[] = { **/ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) { - u8 rev_id; int i; - if (ioa_cfg->type == 0x5702) { - if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID, - &rev_id) == PCIBIOS_SUCCESSFUL) { - if (rev_id < 4) { - for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ - if (__is_processor(ipr_blocked_processors[i])) - return 1; - } - } + if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { + for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ + if (__is_processor(ipr_blocked_processors[i])) + return 1; } } return 0; @@ -5336,15 +5384,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); } + scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); ioa_cfg->reset_retries = 0; list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); wake_up_all(&ioa_cfg->reset_wait_q); - spin_unlock_irq(ioa_cfg->host->host_lock); + spin_unlock(ioa_cfg->host->host_lock); scsi_unblock_requests(ioa_cfg->host); - spin_lock_irq(ioa_cfg->host->host_lock); + spin_lock(ioa_cfg->host->host_lock); if (!ioa_cfg->allow_cmds) scsi_block_requests(ioa_cfg->host); @@ -5772,6 +5821,94 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) } /** + * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA + * @ipr_cmd: ipr command struct + * + * This function enables dual IOA RAID support if possible. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; + struct ipr_mode_page24 *mode_page; + int length; + + ENTER; + mode_page = ipr_get_mode_page(mode_pages, 0x24, + sizeof(struct ipr_mode_page24)); + + if (mode_page) + mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; + + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; + + ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), + length); + + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense + * @ipr_cmd: ipr command struct + * + * This function handles the failure of a Mode Sense to the IOAFP. + * Some adapters do not handle all mode pages. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) +{ + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + return IPR_RC_JOB_CONTINUE; + } + + return ipr_reset_cmd_failed(ipr_cmd); +} + +/** + * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA + * @ipr_cmd: ipr command struct + * + * This function send a mode sense to the IOA to retrieve + * the IOA Advanced Function Control mode page. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), + 0x24, ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, mode_pages), + sizeof(struct ipr_mode_pages)); + + ipr_cmd->job_step = ipr_ioafp_mode_select_page24; + ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** * ipr_init_res_table - Initialize the resource table * @ipr_cmd: ipr command struct * @@ -5839,7 +5976,10 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) } } - ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) + ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; + else + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; LEAVE; return IPR_RC_JOB_CONTINUE; @@ -5861,8 +6001,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; ENTER; + if (cap->cap & IPR_CAP_DUAL_IOA_RAID) + ioa_cfg->dual_raid = 1; dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", ucode_vpd->major_release, ucode_vpd->card_type, ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); @@ -5946,6 +6089,37 @@ static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) } /** + * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 0xD0 inquiry to the adapter + * to retrieve adapter capabilities. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; + struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + memset(cap, 0, sizeof(*cap)); + + if (ipr_inquiry_page_supported(page0, 0xD0)) { + ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), + sizeof(struct ipr_inquiry_cap)); + return IPR_RC_JOB_RETURN; + } + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. * @ipr_cmd: ipr command struct * @@ -5965,7 +6139,7 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) if (!ipr_inquiry_page_supported(page0, 1)) ioa_cfg->cache_state = CACHE_NONE; - ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + ipr_cmd->job_step = ipr_ioafp_cap_inquiry; ipr_ioafp_inquiry(ipr_cmd, 1, 3, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), @@ -6187,7 +6361,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); ipr_cmd->timer.data = (unsigned long) ipr_cmd; - ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ); + ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); @@ -6251,6 +6425,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) struct ipr_hostrcb *hostrcb; struct ipr_uc_sdt sdt; int rc, length; + u32 ioasc; mailbox = readl(ioa_cfg->ioa_mailbox); @@ -6283,9 +6458,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) (__be32 *)&hostrcb->hcam, min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); - if (!rc) + if (!rc) { ipr_handle_log_data(ioa_cfg, hostrcb); - else + ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); + if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && + ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + } else ipr_unit_check_no_data(ioa_cfg); list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); @@ -6308,7 +6487,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) int rc; ENTER; - pci_unblock_user_cfg_access(ioa_cfg->pdev); rc = pci_restore_state(ioa_cfg->pdev); if (rc != PCIBIOS_SUCCESSFUL) { @@ -6349,6 +6527,24 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) } /** + * ipr_reset_bist_done - BIST has completed on the adapter. + * @ipr_cmd: ipr command struct + * + * Description: Unblock config space and resume the reset process. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) +{ + ENTER; + pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); + ipr_cmd->job_step = ipr_reset_restore_cfg_space; + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** * ipr_reset_start_bist - Run BIST on the adapter. * @ipr_cmd: ipr command struct * @@ -6367,10 +6563,11 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); if (rc != PCIBIOS_SUCCESSFUL) { + pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); rc = IPR_RC_JOB_CONTINUE; } else { - ipr_cmd->job_step = ipr_reset_restore_cfg_space; + ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); rc = IPR_RC_JOB_RETURN; } @@ -6380,6 +6577,48 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) } /** + * ipr_reset_slot_reset_done - Clear PCI reset to the adapter + * @ipr_cmd: ipr command struct + * + * Description: This clears PCI reset to the adapter and delays two seconds. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) +{ + ENTER; + pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset); + ipr_cmd->job_step = ipr_reset_bist_done; + ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_slot_reset - Reset the PCI slot of the adapter. + * @ipr_cmd: ipr command struct + * + * Description: This asserts PCI reset to the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct pci_dev *pdev = ioa_cfg->pdev; + + ENTER; + pci_block_user_cfg_access(pdev); + pci_set_pcie_reset_state(pdev, pcie_warm_reset); + ipr_cmd->job_step = ipr_reset_slot_reset_done; + ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT); + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** * ipr_reset_allowed - Query whether or not IOA can be reset * @ioa_cfg: ioa config struct * @@ -6418,7 +6657,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); } else { - ipr_cmd->job_step = ipr_reset_start_bist; + ipr_cmd->job_step = ioa_cfg->reset; rc = IPR_RC_JOB_CONTINUE; } @@ -6451,7 +6690,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); ipr_cmd->job_step = ipr_reset_wait_to_start_bist; } else { - ipr_cmd->job_step = ipr_reset_start_bist; + ipr_cmd->job_step = ioa_cfg->reset; } ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; @@ -6546,12 +6785,14 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; - if (shutdown_type == IPR_SHUTDOWN_ABBREV) - timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; + if (shutdown_type == IPR_SHUTDOWN_NORMAL) + timeout = IPR_SHUTDOWN_TIMEOUT; else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) timeout = IPR_INTERNAL_TIMEOUT; + else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) + timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; else - timeout = IPR_SHUTDOWN_TIMEOUT; + timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); @@ -6731,8 +6972,11 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, - IPR_SHUTDOWN_NONE); + if (ioa_cfg->needs_warm_reset) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + else + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, + IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); return PCI_ERS_RESULT_RECOVERED; } @@ -6794,7 +7038,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, * where it can accept new commands. * Return value: - * 0 on sucess / -EIO on failure + * 0 on success / -EIO on failure **/ static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) { @@ -6905,6 +7149,7 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) ENTER; free_irq(pdev->irq, ioa_cfg); + pci_disable_msi(pdev); iounmap(ioa_cfg->hdw_dma_regs); pci_release_regions(pdev); ipr_free_mem(ioa_cfg); @@ -7099,8 +7344,6 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->pdev = pdev; ioa_cfg->log_level = ipr_log_level; ioa_cfg->doorbell = IPR_DOORBELL; - if (!ipr_auto_create) - ioa_cfg->doorbell |= IPR_RUNTIME_RESET; sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); @@ -7183,7 +7426,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, unsigned long ipr_regs_pci; void __iomem *ipr_regs; int rc = PCIBIOS_SUCCESSFUL; - volatile u32 mask, uproc; + volatile u32 mask, uproc, interrupts; ENTER; @@ -7192,6 +7435,11 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, goto out; } + if (!(rc = pci_enable_msi(pdev))) + dev_info(&pdev->dev, "MSI enabled\n"); + else if (ipr_debug) + dev_info(&pdev->dev, "Cannot enable MSI\n"); + dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); @@ -7215,6 +7463,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, goto out_scsi_host_put; } + if (ipr_transop_timeout) + ioa_cfg->transop_timeout = ipr_transop_timeout; + else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) + ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; + else + ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; + + ioa_cfg->revid = pdev->revision; + ipr_regs_pci = pci_resource_start(pdev, 0); rc = pci_request_regions(pdev, IPR_NAME); @@ -7224,7 +7481,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, goto out_scsi_host_put; } - ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0)); + ipr_regs = pci_ioremap_bar(pdev, 0); if (!ipr_regs) { dev_err(&pdev->dev, @@ -7241,7 +7498,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, pci_set_master(pdev); - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc < 0) { dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); goto cleanup_nomem; @@ -7283,9 +7540,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, * the card is in an unknown state and needs a hard reset */ mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) ioa_cfg->needs_hard_reset = 1; + if (interrupts & IPR_PCII_ERROR_INTERRUPTS) + ioa_cfg->needs_hard_reset = 1; + if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) + ioa_cfg->ioa_unit_checked = 1; ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); @@ -7296,6 +7558,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, goto cleanup_nolog; } + if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || + (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { + ioa_cfg->needs_warm_reset = 1; + ioa_cfg->reset = ipr_reset_slot_reset; + } else + ioa_cfg->reset = ipr_reset_start_bist; + spin_lock(&ipr_driver_lock); list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); spin_unlock(&ipr_driver_lock); @@ -7313,6 +7582,7 @@ out_release_regions: out_scsi_host_put: scsi_host_put(host); out_disable: + pci_disable_msi(pdev); pci_disable_device(pdev); goto out; } @@ -7378,6 +7648,12 @@ static void __ipr_remove(struct pci_dev *pdev) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + } + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); @@ -7413,9 +7689,9 @@ static void ipr_remove(struct pci_dev *pdev) ENTER; - ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, + ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); - ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj, + ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, &ipr_dump_attr); scsi_remove_host(ioa_cfg->host); @@ -7456,7 +7732,7 @@ static int __devinit ipr_probe(struct pci_dev *pdev, return rc; } - rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj, + rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); if (rc) { @@ -7465,11 +7741,11 @@ static int __devinit ipr_probe(struct pci_dev *pdev, return rc; } - rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj, + rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, &ipr_dump_attr); if (rc) { - ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, + ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); scsi_remove_host(ioa_cfg->host); __ipr_remove(pdev); @@ -7501,6 +7777,12 @@ static void ipr_shutdown(struct pci_dev *pdev) unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + while(ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); @@ -7522,31 +7804,48 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT}, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, + IPR_USE_LONG_TRANSOP_TIMEOUT }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table);