2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
74 #include <linux/hdreg.h>
75 #include <linux/stringify.h>
78 #include <asm/processor.h>
79 #include <scsi/scsi.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_tcq.h>
82 #include <scsi/scsi_eh.h>
83 #include <scsi/scsi_cmnd.h>
89 static LIST_HEAD(ipr_ioa_head);
90 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
91 static unsigned int ipr_max_speed = 1;
92 static int ipr_testmode = 0;
93 static unsigned int ipr_fastfail = 0;
94 static unsigned int ipr_transop_timeout = 0;
95 static unsigned int ipr_enable_cache = 1;
96 static unsigned int ipr_debug = 0;
97 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
98 static unsigned int ipr_dual_ioa_raid = 1;
99 static DEFINE_SPINLOCK(ipr_driver_lock);
101 /* This table describes the differences between DMA controller chips */
102 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105 .cache_line_size = 0x20,
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
109 .sense_interrupt_mask_reg = 0x0022C,
110 .clr_interrupt_reg = 0x00228,
111 .sense_interrupt_reg = 0x00224,
112 .ioarrin_reg = 0x00404,
113 .sense_uproc_interrupt_reg = 0x00214,
114 .set_uproc_interrupt_reg = 0x00214,
115 .clr_uproc_interrupt_reg = 0x00218
118 { /* Snipe and Scamp */
120 .cache_line_size = 0x20,
122 .set_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_mask_reg = 0x0028C,
124 .sense_interrupt_mask_reg = 0x00288,
125 .clr_interrupt_reg = 0x00284,
126 .sense_interrupt_reg = 0x00280,
127 .ioarrin_reg = 0x00504,
128 .sense_uproc_interrupt_reg = 0x00290,
129 .set_uproc_interrupt_reg = 0x00290,
130 .clr_uproc_interrupt_reg = 0x00294
135 .cache_line_size = 0x20,
137 .set_interrupt_mask_reg = 0x00010,
138 .clr_interrupt_mask_reg = 0x00018,
139 .sense_interrupt_mask_reg = 0x00010,
140 .clr_interrupt_reg = 0x00008,
141 .sense_interrupt_reg = 0x00000,
142 .ioarrin_reg = 0x00070,
143 .sense_uproc_interrupt_reg = 0x00020,
144 .set_uproc_interrupt_reg = 0x00020,
145 .clr_uproc_interrupt_reg = 0x00028,
146 .dump_addr_reg = 0x00064,
147 .dump_data_reg = 0x00068
152 static const struct ipr_chip_t ipr_chip[] = {
153 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
154 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
155 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
156 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
157 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
159 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
162 static int ipr_max_bus_speeds [] = {
163 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
166 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
167 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
168 module_param_named(max_speed, ipr_max_speed, uint, 0);
169 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
170 module_param_named(log_level, ipr_log_level, uint, 0);
171 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
172 module_param_named(testmode, ipr_testmode, int, 0);
173 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
174 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
175 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
176 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
177 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
178 module_param_named(enable_cache, ipr_enable_cache, int, 0);
179 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
180 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
181 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
182 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
183 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
184 module_param_named(max_devs, ipr_max_devs, int, 0);
185 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
186 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
187 MODULE_LICENSE("GPL");
188 MODULE_VERSION(IPR_DRIVER_VERSION);
190 /* A constant array of IOASCs/URCs/Error Messages */
192 struct ipr_error_table_t ipr_error_table[] = {
193 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
194 "8155: An unknown error was received"},
196 "Soft underlength error"},
198 "Command to be cancelled not found"},
200 "Qualified success"},
201 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
202 "FFFE: Soft device bus error recovered by the IOA"},
203 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
204 "4101: Soft device bus fabric error"},
205 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
206 "FFF9: Device sector reassign successful"},
207 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
208 "FFF7: Media error recovered by device rewrite procedures"},
209 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
210 "7001: IOA sector reassignment successful"},
211 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
212 "FFF9: Soft media error. Sector reassignment recommended"},
213 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
214 "FFF7: Media error recovered by IOA rewrite procedures"},
215 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
216 "FF3D: Soft PCI bus error recovered by the IOA"},
217 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
218 "FFF6: Device hardware error recovered by the IOA"},
219 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
220 "FFF6: Device hardware error recovered by the device"},
221 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
222 "FF3D: Soft IOA error recovered by the IOA"},
223 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
224 "FFFA: Undefined device response recovered by the IOA"},
225 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
226 "FFF6: Device bus error, message or command phase"},
227 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
228 "FFFE: Task Management Function failed"},
229 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
230 "FFF6: Failure prediction threshold exceeded"},
231 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
232 "8009: Impending cache battery pack failure"},
234 "34FF: Disk device format in progress"},
235 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
236 "9070: IOA requested reset"},
238 "Synchronization required"},
240 "No ready, IOA shutdown"},
242 "Not ready, IOA has been shutdown"},
243 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
244 "3020: Storage subsystem configuration error"},
246 "FFF5: Medium error, data unreadable, recommend reassign"},
248 "7000: Medium error, data unreadable, do not reassign"},
249 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFF3: Disk media format bad"},
251 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "3002: Addressed device failed to respond to selection"},
253 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
254 "3100: Device bus error"},
255 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
256 "3109: IOA timed out a device command"},
258 "3120: SCSI bus is not operational"},
259 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
260 "4100: Hard device bus fabric error"},
261 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
262 "9000: IOA reserved area data check"},
263 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
264 "9001: IOA reserved area invalid data pattern"},
265 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
266 "9002: IOA reserved area LRC error"},
267 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
268 "102E: Out of alternate sectors for disk storage"},
269 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
270 "FFF4: Data transfer underlength error"},
271 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
272 "FFF4: Data transfer overlength error"},
273 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
274 "3400: Logical unit failure"},
275 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
276 "FFF4: Device microcode is corrupt"},
277 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
278 "8150: PCI bus error"},
280 "Unsupported device bus message received"},
281 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
282 "FFF4: Disk device problem"},
283 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
284 "8150: Permanent IOA failure"},
285 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
286 "3010: Disk device returned wrong response to IOA"},
287 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
288 "8151: IOA microcode error"},
290 "Device bus status error"},
291 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
292 "8157: IOA error requiring IOA reset to recover"},
294 "ATA device status error"},
296 "Message reject received from the device"},
297 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
298 "8008: A permanent cache battery pack failure occurred"},
299 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
300 "9090: Disk unit has been modified after the last known status"},
301 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
302 "9081: IOA detected device error"},
303 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
304 "9082: IOA detected device error"},
305 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
306 "3110: Device bus error, message or command phase"},
307 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
308 "3110: SAS Command / Task Management Function failed"},
309 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
310 "9091: Incorrect hardware configuration change has been detected"},
311 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
312 "9073: Invalid multi-adapter configuration"},
313 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
314 "4010: Incorrect connection between cascaded expanders"},
315 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
316 "4020: Connections exceed IOA design limits"},
317 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
318 "4030: Incorrect multipath connection"},
319 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "4110: Unsupported enclosure function"},
321 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
322 "FFF4: Command to logical unit failed"},
324 "Illegal request, invalid request type or request packet"},
326 "Illegal request, invalid resource handle"},
328 "Illegal request, commands not allowed to this device"},
330 "Illegal request, command not allowed to a secondary adapter"},
332 "Illegal request, invalid field in parameter list"},
334 "Illegal request, parameter not supported"},
336 "Illegal request, parameter value invalid"},
338 "Illegal request, command sequence error"},
340 "Illegal request, dual adapter support not enabled"},
341 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
342 "9031: Array protection temporarily suspended, protection resuming"},
343 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
344 "9040: Array protection temporarily suspended, protection resuming"},
345 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
346 "3140: Device bus not ready to ready transition"},
347 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
348 "FFFB: SCSI bus was reset"},
350 "FFFE: SCSI bus transition to single ended"},
352 "FFFE: SCSI bus transition to LVD"},
353 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
354 "FFFB: SCSI bus was reset by another initiator"},
355 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
356 "3029: A device replacement has occurred"},
357 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
358 "9051: IOA cache data exists for a missing or failed device"},
359 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
360 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
361 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9025: Disk unit is not supported at its physical location"},
363 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
364 "3020: IOA detected a SCSI bus configuration error"},
365 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
366 "3150: SCSI bus configuration error"},
367 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
368 "9074: Asymmetric advanced function disk configuration"},
369 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
370 "4040: Incomplete multipath connection between IOA and enclosure"},
371 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
372 "4041: Incomplete multipath connection between enclosure and device"},
373 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
374 "9075: Incomplete multipath connection between IOA and remote IOA"},
375 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
376 "9076: Configuration error, missing remote IOA"},
377 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
378 "4050: Enclosure does not support a required multipath function"},
379 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
380 "4070: Logically bad block written on device"},
381 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9041: Array protection temporarily suspended"},
383 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9042: Corrupt array parity detected on specified device"},
385 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
386 "9030: Array no longer protected due to missing or failed disk unit"},
387 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
388 "9071: Link operational transition"},
389 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9072: Link not operational transition"},
391 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9032: Array exposed but still protected"},
393 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
394 "70DD: Device forced failed by disrupt device command"},
395 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
396 "4061: Multipath redundancy level got better"},
397 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
398 "4060: Multipath redundancy level got worse"},
400 "Failure due to other device"},
401 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
402 "9008: IOA does not support functions expected by devices"},
403 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
404 "9010: Cache data associated with attached devices cannot be found"},
405 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
406 "9011: Cache data belongs to devices other than those attached"},
407 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
408 "9020: Array missing 2 or more devices with only 1 device present"},
409 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
410 "9021: Array missing 2 or more devices with 2 or more devices present"},
411 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
412 "9022: Exposed array is missing a required device"},
413 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
414 "9023: Array member(s) not at required physical locations"},
415 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
416 "9024: Array not functional due to present hardware configuration"},
417 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
418 "9026: Array not functional due to present hardware configuration"},
419 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
420 "9027: Array is missing a device and parity is out of sync"},
421 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
422 "9028: Maximum number of arrays already exist"},
423 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
424 "9050: Required cache data cannot be located for a disk unit"},
425 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
426 "9052: Cache data exists for a device that has been modified"},
427 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
428 "9054: IOA resources not available due to previous problems"},
429 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
430 "9092: Disk unit requires initialization before use"},
431 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
432 "9029: Incorrect hardware configuration change has been detected"},
433 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
434 "9060: One or more disk pairs are missing from an array"},
435 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
436 "9061: One or more disks are missing from an array"},
437 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
438 "9062: One or more disks are missing from an array"},
439 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
440 "9063: Maximum number of functional arrays has been exceeded"},
442 "Aborted command, invalid descriptor"},
444 "Command terminated by host"}
447 static const struct ipr_ses_table_entry ipr_ses_table[] = {
448 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
449 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
450 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
451 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
452 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
453 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
454 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
455 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
456 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
457 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
458 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
459 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
460 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
464 * Function Prototypes
466 static int ipr_reset_alert(struct ipr_cmnd *);
467 static void ipr_process_ccn(struct ipr_cmnd *);
468 static void ipr_process_error(struct ipr_cmnd *);
469 static void ipr_reset_ioa_job(struct ipr_cmnd *);
470 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
471 enum ipr_shutdown_type);
473 #ifdef CONFIG_SCSI_IPR_TRACE
475 * ipr_trc_hook - Add a trace entry to the driver trace
476 * @ipr_cmd: ipr command struct
478 * @add_data: additional data
483 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
484 u8 type, u32 add_data)
486 struct ipr_trace_entry *trace_entry;
487 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
489 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
490 trace_entry->time = jiffies;
491 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
492 trace_entry->type = type;
493 if (ipr_cmd->ioa_cfg->sis64)
494 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
496 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
497 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
498 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
499 trace_entry->u.add_data = add_data;
502 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
506 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
507 * @ipr_cmd: ipr command struct
512 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
514 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
515 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
516 dma_addr_t dma_addr = ipr_cmd->dma_addr;
518 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
519 ioarcb->data_transfer_length = 0;
520 ioarcb->read_data_transfer_length = 0;
521 ioarcb->ioadl_len = 0;
522 ioarcb->read_ioadl_len = 0;
524 if (ipr_cmd->ioa_cfg->sis64)
525 ioarcb->u.sis64_addr_data.data_ioadl_addr =
526 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
528 ioarcb->write_ioadl_addr =
529 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
530 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
534 ioasa->residual_data_len = 0;
535 ioasa->u.gata.status = 0;
537 ipr_cmd->scsi_cmd = NULL;
539 ipr_cmd->sense_buffer[0] = 0;
540 ipr_cmd->dma_use_sg = 0;
544 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
545 * @ipr_cmd: ipr command struct
550 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
552 ipr_reinit_ipr_cmnd(ipr_cmd);
553 ipr_cmd->u.scratch = 0;
554 ipr_cmd->sibling = NULL;
555 init_timer(&ipr_cmd->timer);
559 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
560 * @ioa_cfg: ioa config struct
563 * pointer to ipr command struct
566 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
568 struct ipr_cmnd *ipr_cmd;
570 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
571 list_del(&ipr_cmd->queue);
572 ipr_init_ipr_cmnd(ipr_cmd);
578 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
579 * @ioa_cfg: ioa config struct
580 * @clr_ints: interrupts to clear
582 * This function masks all interrupts on the adapter, then clears the
583 * interrupts specified in the mask
588 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
591 volatile u32 int_reg;
593 /* Stop new interrupts */
594 ioa_cfg->allow_interrupts = 0;
596 /* Set interrupt mask to stop all new interrupts */
597 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
599 /* Clear any pending interrupts */
600 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
601 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
605 * ipr_save_pcix_cmd_reg - Save PCI-X command register
606 * @ioa_cfg: ioa config struct
609 * 0 on success / -EIO on failure
611 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
613 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
615 if (pcix_cmd_reg == 0)
618 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
619 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
620 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
624 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
629 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
630 * @ioa_cfg: ioa config struct
633 * 0 on success / -EIO on failure
635 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
637 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
640 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
641 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
642 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
651 * ipr_sata_eh_done - done function for aborted SATA commands
652 * @ipr_cmd: ipr command struct
654 * This function is invoked for ops generated to SATA
655 * devices which are being aborted.
660 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
662 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
663 struct ata_queued_cmd *qc = ipr_cmd->qc;
664 struct ipr_sata_port *sata_port = qc->ap->private_data;
666 qc->err_mask |= AC_ERR_OTHER;
667 sata_port->ioasa.status |= ATA_BUSY;
668 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
673 * ipr_scsi_eh_done - mid-layer done function for aborted ops
674 * @ipr_cmd: ipr command struct
676 * This function is invoked by the interrupt handler for
677 * ops generated by the SCSI mid-layer which are being aborted.
682 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
685 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
687 scsi_cmd->result |= (DID_ERROR << 16);
689 scsi_dma_unmap(ipr_cmd->scsi_cmd);
690 scsi_cmd->scsi_done(scsi_cmd);
691 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
695 * ipr_fail_all_ops - Fails all outstanding ops.
696 * @ioa_cfg: ioa config struct
698 * This function fails all outstanding ops.
703 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
705 struct ipr_cmnd *ipr_cmd, *temp;
708 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
709 list_del(&ipr_cmd->queue);
711 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
712 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
714 if (ipr_cmd->scsi_cmd)
715 ipr_cmd->done = ipr_scsi_eh_done;
716 else if (ipr_cmd->qc)
717 ipr_cmd->done = ipr_sata_eh_done;
719 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
720 del_timer(&ipr_cmd->timer);
721 ipr_cmd->done(ipr_cmd);
728 * ipr_send_command - Send driver initiated requests.
729 * @ipr_cmd: ipr command struct
731 * This function sends a command to the adapter using the correct write call.
732 * In the case of sis64, calculate the ioarcb size required. Then or in the
738 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
740 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
741 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
743 if (ioa_cfg->sis64) {
744 /* The default size is 256 bytes */
745 send_dma_addr |= 0x1;
747 /* If the number of ioadls * size of ioadl > 128 bytes,
748 then use a 512 byte ioarcb */
749 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
750 send_dma_addr |= 0x4;
751 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
753 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
757 * ipr_do_req - Send driver initiated requests.
758 * @ipr_cmd: ipr command struct
759 * @done: done function
760 * @timeout_func: timeout function
761 * @timeout: timeout value
763 * This function sends the specified command to the adapter with the
764 * timeout given. The done function is invoked on command completion.
769 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
770 void (*done) (struct ipr_cmnd *),
771 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
773 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
775 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
777 ipr_cmd->done = done;
779 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
780 ipr_cmd->timer.expires = jiffies + timeout;
781 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
783 add_timer(&ipr_cmd->timer);
785 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
789 ipr_send_command(ipr_cmd);
793 * ipr_internal_cmd_done - Op done function for an internally generated op.
794 * @ipr_cmd: ipr command struct
796 * This function is the op done function for an internally generated,
797 * blocking op. It simply wakes the sleeping thread.
802 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
804 if (ipr_cmd->sibling)
805 ipr_cmd->sibling = NULL;
807 complete(&ipr_cmd->completion);
811 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
812 * @ipr_cmd: ipr command struct
813 * @dma_addr: dma address
814 * @len: transfer length
815 * @flags: ioadl flag value
817 * This function initializes an ioadl in the case where there is only a single
823 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
826 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
827 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
829 ipr_cmd->dma_use_sg = 1;
831 if (ipr_cmd->ioa_cfg->sis64) {
832 ioadl64->flags = cpu_to_be32(flags);
833 ioadl64->data_len = cpu_to_be32(len);
834 ioadl64->address = cpu_to_be64(dma_addr);
836 ipr_cmd->ioarcb.ioadl_len =
837 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
838 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
840 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
841 ioadl->address = cpu_to_be32(dma_addr);
843 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
844 ipr_cmd->ioarcb.read_ioadl_len =
845 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
846 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
848 ipr_cmd->ioarcb.ioadl_len =
849 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
850 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
856 * ipr_send_blocking_cmd - Send command and sleep on its completion.
857 * @ipr_cmd: ipr command struct
858 * @timeout_func: function to invoke if command times out
864 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
865 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
868 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
870 init_completion(&ipr_cmd->completion);
871 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
873 spin_unlock_irq(ioa_cfg->host->host_lock);
874 wait_for_completion(&ipr_cmd->completion);
875 spin_lock_irq(ioa_cfg->host->host_lock);
879 * ipr_send_hcam - Send an HCAM to the adapter.
880 * @ioa_cfg: ioa config struct
882 * @hostrcb: hostrcb struct
884 * This function will send a Host Controlled Async command to the adapter.
885 * If HCAMs are currently not allowed to be issued to the adapter, it will
886 * place the hostrcb on the free queue.
891 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
892 struct ipr_hostrcb *hostrcb)
894 struct ipr_cmnd *ipr_cmd;
895 struct ipr_ioarcb *ioarcb;
897 if (ioa_cfg->allow_cmds) {
898 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
899 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
900 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
902 ipr_cmd->u.hostrcb = hostrcb;
903 ioarcb = &ipr_cmd->ioarcb;
905 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
906 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
907 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
908 ioarcb->cmd_pkt.cdb[1] = type;
909 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
910 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
912 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
913 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
915 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
916 ipr_cmd->done = ipr_process_ccn;
918 ipr_cmd->done = ipr_process_error;
920 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
924 ipr_send_command(ipr_cmd);
926 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
931 * ipr_update_ata_class - Update the ata class in the resource entry
932 * @res: resource entry struct
933 * @proto: cfgte device bus protocol value
938 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
942 case IPR_PROTO_SAS_STP:
943 res->ata_class = ATA_DEV_ATA;
945 case IPR_PROTO_SATA_ATAPI:
946 case IPR_PROTO_SAS_STP_ATAPI:
947 res->ata_class = ATA_DEV_ATAPI;
950 res->ata_class = ATA_DEV_UNKNOWN;
956 * ipr_init_res_entry - Initialize a resource entry struct.
957 * @res: resource entry struct
958 * @cfgtew: config table entry wrapper struct
963 static void ipr_init_res_entry(struct ipr_resource_entry *res,
964 struct ipr_config_table_entry_wrapper *cfgtew)
968 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
969 struct ipr_resource_entry *gscsi_res = NULL;
971 res->needs_sync_complete = 0;
974 res->del_from_ml = 0;
975 res->resetting_device = 0;
977 res->sata_port = NULL;
979 if (ioa_cfg->sis64) {
980 proto = cfgtew->u.cfgte64->proto;
981 res->res_flags = cfgtew->u.cfgte64->res_flags;
982 res->qmodel = IPR_QUEUEING_MODEL64(res);
983 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
985 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
986 sizeof(res->res_path));
989 res->lun = scsilun_to_int(&res->dev_lun);
991 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
992 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
993 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
995 res->target = gscsi_res->target;
1000 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1001 ioa_cfg->max_devs_supported);
1002 set_bit(res->target, ioa_cfg->target_ids);
1005 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1006 sizeof(res->dev_lun.scsi_lun));
1007 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1008 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1010 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1011 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1012 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1013 ioa_cfg->max_devs_supported);
1014 set_bit(res->target, ioa_cfg->array_ids);
1015 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1016 res->bus = IPR_VSET_VIRTUAL_BUS;
1017 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1018 ioa_cfg->max_devs_supported);
1019 set_bit(res->target, ioa_cfg->vset_ids);
1021 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1022 ioa_cfg->max_devs_supported);
1023 set_bit(res->target, ioa_cfg->target_ids);
1026 proto = cfgtew->u.cfgte->proto;
1027 res->qmodel = IPR_QUEUEING_MODEL(res);
1028 res->flags = cfgtew->u.cfgte->flags;
1029 if (res->flags & IPR_IS_IOA_RESOURCE)
1030 res->type = IPR_RES_TYPE_IOAFP;
1032 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1034 res->bus = cfgtew->u.cfgte->res_addr.bus;
1035 res->target = cfgtew->u.cfgte->res_addr.target;
1036 res->lun = cfgtew->u.cfgte->res_addr.lun;
1039 ipr_update_ata_class(res, proto);
1043 * ipr_is_same_device - Determine if two devices are the same.
1044 * @res: resource entry struct
1045 * @cfgtew: config table entry wrapper struct
1048 * 1 if the devices are the same / 0 otherwise
1050 static int ipr_is_same_device(struct ipr_resource_entry *res,
1051 struct ipr_config_table_entry_wrapper *cfgtew)
1053 if (res->ioa_cfg->sis64) {
1054 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1055 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1056 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1057 sizeof(cfgtew->u.cfgte64->lun))) {
1061 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1062 res->target == cfgtew->u.cfgte->res_addr.target &&
1063 res->lun == cfgtew->u.cfgte->res_addr.lun)
1071 * ipr_format_resource_path - Format the resource path for printing.
1072 * @res_path: resource path
1078 static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1082 sprintf(buffer, "%02X", res_path[0]);
1083 for (i=1; res_path[i] != 0xff; i++)
1084 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1090 * ipr_update_res_entry - Update the resource entry.
1091 * @res: resource entry struct
1092 * @cfgtew: config table entry wrapper struct
1097 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1098 struct ipr_config_table_entry_wrapper *cfgtew)
1100 char buffer[IPR_MAX_RES_PATH_LENGTH];
1104 if (res->ioa_cfg->sis64) {
1105 res->flags = cfgtew->u.cfgte64->flags;
1106 res->res_flags = cfgtew->u.cfgte64->res_flags;
1107 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1109 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1110 sizeof(struct ipr_std_inq_data));
1112 res->qmodel = IPR_QUEUEING_MODEL64(res);
1113 proto = cfgtew->u.cfgte64->proto;
1114 res->res_handle = cfgtew->u.cfgte64->res_handle;
1115 res->dev_id = cfgtew->u.cfgte64->dev_id;
1117 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1118 sizeof(res->dev_lun.scsi_lun));
1120 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path))) {
1122 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1123 sizeof(res->res_path));
1127 if (res->sdev && new_path)
1128 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1129 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1131 res->flags = cfgtew->u.cfgte->flags;
1132 if (res->flags & IPR_IS_IOA_RESOURCE)
1133 res->type = IPR_RES_TYPE_IOAFP;
1135 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1137 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1138 sizeof(struct ipr_std_inq_data));
1140 res->qmodel = IPR_QUEUEING_MODEL(res);
1141 proto = cfgtew->u.cfgte->proto;
1142 res->res_handle = cfgtew->u.cfgte->res_handle;
1145 ipr_update_ata_class(res, proto);
1149 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1151 * @res: resource entry struct
1152 * @cfgtew: config table entry wrapper struct
1157 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1159 struct ipr_resource_entry *gscsi_res = NULL;
1160 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1162 if (!ioa_cfg->sis64)
1165 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1166 clear_bit(res->target, ioa_cfg->array_ids);
1167 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1168 clear_bit(res->target, ioa_cfg->vset_ids);
1169 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1170 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1171 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1173 clear_bit(res->target, ioa_cfg->target_ids);
1175 } else if (res->bus == 0)
1176 clear_bit(res->target, ioa_cfg->target_ids);
1180 * ipr_handle_config_change - Handle a config change from the adapter
1181 * @ioa_cfg: ioa config struct
1187 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1188 struct ipr_hostrcb *hostrcb)
1190 struct ipr_resource_entry *res = NULL;
1191 struct ipr_config_table_entry_wrapper cfgtew;
1192 __be32 cc_res_handle;
1196 if (ioa_cfg->sis64) {
1197 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1198 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1200 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1201 cc_res_handle = cfgtew.u.cfgte->res_handle;
1204 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1205 if (res->res_handle == cc_res_handle) {
1212 if (list_empty(&ioa_cfg->free_res_q)) {
1213 ipr_send_hcam(ioa_cfg,
1214 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1219 res = list_entry(ioa_cfg->free_res_q.next,
1220 struct ipr_resource_entry, queue);
1222 list_del(&res->queue);
1223 ipr_init_res_entry(res, &cfgtew);
1224 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1227 ipr_update_res_entry(res, &cfgtew);
1229 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1231 res->del_from_ml = 1;
1232 res->res_handle = IPR_INVALID_RES_HANDLE;
1233 if (ioa_cfg->allow_ml_add_del)
1234 schedule_work(&ioa_cfg->work_q);
1236 ipr_clear_res_target(res);
1237 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1239 } else if (!res->sdev) {
1241 if (ioa_cfg->allow_ml_add_del)
1242 schedule_work(&ioa_cfg->work_q);
1245 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1249 * ipr_process_ccn - Op done function for a CCN.
1250 * @ipr_cmd: ipr command struct
1252 * This function is the op done function for a configuration
1253 * change notification host controlled async from the adapter.
1258 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1260 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1261 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1262 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1264 list_del(&hostrcb->queue);
1265 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1268 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1269 dev_err(&ioa_cfg->pdev->dev,
1270 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1272 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1274 ipr_handle_config_change(ioa_cfg, hostrcb);
1279 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1280 * @i: index into buffer
1281 * @buf: string to modify
1283 * This function will strip all trailing whitespace, pad the end
1284 * of the string with a single space, and NULL terminate the string.
1287 * new length of string
1289 static int strip_and_pad_whitespace(int i, char *buf)
1291 while (i && buf[i] == ' ')
1299 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1300 * @prefix: string to print at start of printk
1301 * @hostrcb: hostrcb pointer
1302 * @vpd: vendor/product id/sn struct
1307 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1308 struct ipr_vpd *vpd)
1310 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1313 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1314 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1316 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1317 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1319 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1320 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1322 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1326 * ipr_log_vpd - Log the passed VPD to the error log.
1327 * @vpd: vendor/product id/sn struct
1332 static void ipr_log_vpd(struct ipr_vpd *vpd)
1334 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1335 + IPR_SERIAL_NUM_LEN];
1337 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1338 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1340 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1341 ipr_err("Vendor/Product ID: %s\n", buffer);
1343 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1344 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1345 ipr_err(" Serial Number: %s\n", buffer);
1349 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1350 * @prefix: string to print at start of printk
1351 * @hostrcb: hostrcb pointer
1352 * @vpd: vendor/product id/sn/wwn struct
1357 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1358 struct ipr_ext_vpd *vpd)
1360 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1361 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1362 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1366 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1367 * @vpd: vendor/product id/sn/wwn struct
1372 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1374 ipr_log_vpd(&vpd->vpd);
1375 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1376 be32_to_cpu(vpd->wwid[1]));
1380 * ipr_log_enhanced_cache_error - Log a cache error.
1381 * @ioa_cfg: ioa config struct
1382 * @hostrcb: hostrcb struct
1387 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1388 struct ipr_hostrcb *hostrcb)
1390 struct ipr_hostrcb_type_12_error *error;
1393 error = &hostrcb->hcam.u.error64.u.type_12_error;
1395 error = &hostrcb->hcam.u.error.u.type_12_error;
1397 ipr_err("-----Current Configuration-----\n");
1398 ipr_err("Cache Directory Card Information:\n");
1399 ipr_log_ext_vpd(&error->ioa_vpd);
1400 ipr_err("Adapter Card Information:\n");
1401 ipr_log_ext_vpd(&error->cfc_vpd);
1403 ipr_err("-----Expected Configuration-----\n");
1404 ipr_err("Cache Directory Card Information:\n");
1405 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1406 ipr_err("Adapter Card Information:\n");
1407 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1409 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1410 be32_to_cpu(error->ioa_data[0]),
1411 be32_to_cpu(error->ioa_data[1]),
1412 be32_to_cpu(error->ioa_data[2]));
1416 * ipr_log_cache_error - Log a cache error.
1417 * @ioa_cfg: ioa config struct
1418 * @hostrcb: hostrcb struct
1423 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1424 struct ipr_hostrcb *hostrcb)
1426 struct ipr_hostrcb_type_02_error *error =
1427 &hostrcb->hcam.u.error.u.type_02_error;
1429 ipr_err("-----Current Configuration-----\n");
1430 ipr_err("Cache Directory Card Information:\n");
1431 ipr_log_vpd(&error->ioa_vpd);
1432 ipr_err("Adapter Card Information:\n");
1433 ipr_log_vpd(&error->cfc_vpd);
1435 ipr_err("-----Expected Configuration-----\n");
1436 ipr_err("Cache Directory Card Information:\n");
1437 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1438 ipr_err("Adapter Card Information:\n");
1439 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1441 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1442 be32_to_cpu(error->ioa_data[0]),
1443 be32_to_cpu(error->ioa_data[1]),
1444 be32_to_cpu(error->ioa_data[2]));
1448 * ipr_log_enhanced_config_error - Log a configuration error.
1449 * @ioa_cfg: ioa config struct
1450 * @hostrcb: hostrcb struct
1455 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1456 struct ipr_hostrcb *hostrcb)
1458 int errors_logged, i;
1459 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1460 struct ipr_hostrcb_type_13_error *error;
1462 error = &hostrcb->hcam.u.error.u.type_13_error;
1463 errors_logged = be32_to_cpu(error->errors_logged);
1465 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1466 be32_to_cpu(error->errors_detected), errors_logged);
1468 dev_entry = error->dev;
1470 for (i = 0; i < errors_logged; i++, dev_entry++) {
1473 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1474 ipr_log_ext_vpd(&dev_entry->vpd);
1476 ipr_err("-----New Device Information-----\n");
1477 ipr_log_ext_vpd(&dev_entry->new_vpd);
1479 ipr_err("Cache Directory Card Information:\n");
1480 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1482 ipr_err("Adapter Card Information:\n");
1483 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1488 * ipr_log_sis64_config_error - Log a device error.
1489 * @ioa_cfg: ioa config struct
1490 * @hostrcb: hostrcb struct
1495 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1496 struct ipr_hostrcb *hostrcb)
1498 int errors_logged, i;
1499 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1500 struct ipr_hostrcb_type_23_error *error;
1501 char buffer[IPR_MAX_RES_PATH_LENGTH];
1503 error = &hostrcb->hcam.u.error64.u.type_23_error;
1504 errors_logged = be32_to_cpu(error->errors_logged);
1506 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1507 be32_to_cpu(error->errors_detected), errors_logged);
1509 dev_entry = error->dev;
1511 for (i = 0; i < errors_logged; i++, dev_entry++) {
1514 ipr_err("Device %d : %s", i + 1,
1515 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1516 ipr_log_ext_vpd(&dev_entry->vpd);
1518 ipr_err("-----New Device Information-----\n");
1519 ipr_log_ext_vpd(&dev_entry->new_vpd);
1521 ipr_err("Cache Directory Card Information:\n");
1522 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1524 ipr_err("Adapter Card Information:\n");
1525 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1530 * ipr_log_config_error - Log a configuration error.
1531 * @ioa_cfg: ioa config struct
1532 * @hostrcb: hostrcb struct
1537 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1538 struct ipr_hostrcb *hostrcb)
1540 int errors_logged, i;
1541 struct ipr_hostrcb_device_data_entry *dev_entry;
1542 struct ipr_hostrcb_type_03_error *error;
1544 error = &hostrcb->hcam.u.error.u.type_03_error;
1545 errors_logged = be32_to_cpu(error->errors_logged);
1547 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1548 be32_to_cpu(error->errors_detected), errors_logged);
1550 dev_entry = error->dev;
1552 for (i = 0; i < errors_logged; i++, dev_entry++) {
1555 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1556 ipr_log_vpd(&dev_entry->vpd);
1558 ipr_err("-----New Device Information-----\n");
1559 ipr_log_vpd(&dev_entry->new_vpd);
1561 ipr_err("Cache Directory Card Information:\n");
1562 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1564 ipr_err("Adapter Card Information:\n");
1565 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1567 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1568 be32_to_cpu(dev_entry->ioa_data[0]),
1569 be32_to_cpu(dev_entry->ioa_data[1]),
1570 be32_to_cpu(dev_entry->ioa_data[2]),
1571 be32_to_cpu(dev_entry->ioa_data[3]),
1572 be32_to_cpu(dev_entry->ioa_data[4]));
1577 * ipr_log_enhanced_array_error - Log an array configuration error.
1578 * @ioa_cfg: ioa config struct
1579 * @hostrcb: hostrcb struct
1584 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1585 struct ipr_hostrcb *hostrcb)
1588 struct ipr_hostrcb_type_14_error *error;
1589 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1590 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1592 error = &hostrcb->hcam.u.error.u.type_14_error;
1596 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1597 error->protection_level,
1598 ioa_cfg->host->host_no,
1599 error->last_func_vset_res_addr.bus,
1600 error->last_func_vset_res_addr.target,
1601 error->last_func_vset_res_addr.lun);
1605 array_entry = error->array_member;
1606 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1607 sizeof(error->array_member));
1609 for (i = 0; i < num_entries; i++, array_entry++) {
1610 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1613 if (be32_to_cpu(error->exposed_mode_adn) == i)
1614 ipr_err("Exposed Array Member %d:\n", i);
1616 ipr_err("Array Member %d:\n", i);
1618 ipr_log_ext_vpd(&array_entry->vpd);
1619 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1620 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1621 "Expected Location");
1628 * ipr_log_array_error - Log an array configuration error.
1629 * @ioa_cfg: ioa config struct
1630 * @hostrcb: hostrcb struct
1635 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1636 struct ipr_hostrcb *hostrcb)
1639 struct ipr_hostrcb_type_04_error *error;
1640 struct ipr_hostrcb_array_data_entry *array_entry;
1641 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1643 error = &hostrcb->hcam.u.error.u.type_04_error;
1647 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1648 error->protection_level,
1649 ioa_cfg->host->host_no,
1650 error->last_func_vset_res_addr.bus,
1651 error->last_func_vset_res_addr.target,
1652 error->last_func_vset_res_addr.lun);
1656 array_entry = error->array_member;
1658 for (i = 0; i < 18; i++) {
1659 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1662 if (be32_to_cpu(error->exposed_mode_adn) == i)
1663 ipr_err("Exposed Array Member %d:\n", i);
1665 ipr_err("Array Member %d:\n", i);
1667 ipr_log_vpd(&array_entry->vpd);
1669 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1670 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1671 "Expected Location");
1676 array_entry = error->array_member2;
1683 * ipr_log_hex_data - Log additional hex IOA error data.
1684 * @ioa_cfg: ioa config struct
1685 * @data: IOA error data
1691 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1698 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1699 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1701 for (i = 0; i < len / 4; i += 4) {
1702 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1703 be32_to_cpu(data[i]),
1704 be32_to_cpu(data[i+1]),
1705 be32_to_cpu(data[i+2]),
1706 be32_to_cpu(data[i+3]));
1711 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1712 * @ioa_cfg: ioa config struct
1713 * @hostrcb: hostrcb struct
1718 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1719 struct ipr_hostrcb *hostrcb)
1721 struct ipr_hostrcb_type_17_error *error;
1724 error = &hostrcb->hcam.u.error64.u.type_17_error;
1726 error = &hostrcb->hcam.u.error.u.type_17_error;
1728 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1729 strim(error->failure_reason);
1731 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1732 be32_to_cpu(hostrcb->hcam.u.error.prc));
1733 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1734 ipr_log_hex_data(ioa_cfg, error->data,
1735 be32_to_cpu(hostrcb->hcam.length) -
1736 (offsetof(struct ipr_hostrcb_error, u) +
1737 offsetof(struct ipr_hostrcb_type_17_error, data)));
1741 * ipr_log_dual_ioa_error - Log a dual adapter error.
1742 * @ioa_cfg: ioa config struct
1743 * @hostrcb: hostrcb struct
1748 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1749 struct ipr_hostrcb *hostrcb)
1751 struct ipr_hostrcb_type_07_error *error;
1753 error = &hostrcb->hcam.u.error.u.type_07_error;
1754 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1755 strim(error->failure_reason);
1757 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1758 be32_to_cpu(hostrcb->hcam.u.error.prc));
1759 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1760 ipr_log_hex_data(ioa_cfg, error->data,
1761 be32_to_cpu(hostrcb->hcam.length) -
1762 (offsetof(struct ipr_hostrcb_error, u) +
1763 offsetof(struct ipr_hostrcb_type_07_error, data)));
1766 static const struct {
1769 } path_active_desc[] = {
1770 { IPR_PATH_NO_INFO, "Path" },
1771 { IPR_PATH_ACTIVE, "Active path" },
1772 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1775 static const struct {
1778 } path_state_desc[] = {
1779 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1780 { IPR_PATH_HEALTHY, "is healthy" },
1781 { IPR_PATH_DEGRADED, "is degraded" },
1782 { IPR_PATH_FAILED, "is failed" }
1786 * ipr_log_fabric_path - Log a fabric path error
1787 * @hostrcb: hostrcb struct
1788 * @fabric: fabric descriptor
1793 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1794 struct ipr_hostrcb_fabric_desc *fabric)
1797 u8 path_state = fabric->path_state;
1798 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1799 u8 state = path_state & IPR_PATH_STATE_MASK;
1801 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1802 if (path_active_desc[i].active != active)
1805 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1806 if (path_state_desc[j].state != state)
1809 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1810 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1811 path_active_desc[i].desc, path_state_desc[j].desc,
1813 } else if (fabric->cascaded_expander == 0xff) {
1814 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1815 path_active_desc[i].desc, path_state_desc[j].desc,
1816 fabric->ioa_port, fabric->phy);
1817 } else if (fabric->phy == 0xff) {
1818 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1819 path_active_desc[i].desc, path_state_desc[j].desc,
1820 fabric->ioa_port, fabric->cascaded_expander);
1822 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1823 path_active_desc[i].desc, path_state_desc[j].desc,
1824 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1830 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1831 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1835 * ipr_log64_fabric_path - Log a fabric path error
1836 * @hostrcb: hostrcb struct
1837 * @fabric: fabric descriptor
1842 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1843 struct ipr_hostrcb64_fabric_desc *fabric)
1846 u8 path_state = fabric->path_state;
1847 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1848 u8 state = path_state & IPR_PATH_STATE_MASK;
1849 char buffer[IPR_MAX_RES_PATH_LENGTH];
1851 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1852 if (path_active_desc[i].active != active)
1855 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1856 if (path_state_desc[j].state != state)
1859 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1860 path_active_desc[i].desc, path_state_desc[j].desc,
1861 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1866 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1867 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1870 static const struct {
1873 } path_type_desc[] = {
1874 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1875 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1876 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1877 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1880 static const struct {
1883 } path_status_desc[] = {
1884 { IPR_PATH_CFG_NO_PROB, "Functional" },
1885 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1886 { IPR_PATH_CFG_FAILED, "Failed" },
1887 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1888 { IPR_PATH_NOT_DETECTED, "Missing" },
1889 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1892 static const char *link_rate[] = {
1895 "phy reset problem",
1912 * ipr_log_path_elem - Log a fabric path element.
1913 * @hostrcb: hostrcb struct
1914 * @cfg: fabric path element struct
1919 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1920 struct ipr_hostrcb_config_element *cfg)
1923 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1924 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1926 if (type == IPR_PATH_CFG_NOT_EXIST)
1929 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1930 if (path_type_desc[i].type != type)
1933 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1934 if (path_status_desc[j].status != status)
1937 if (type == IPR_PATH_CFG_IOA_PORT) {
1938 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1939 path_status_desc[j].desc, path_type_desc[i].desc,
1940 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1941 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1943 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1944 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1945 path_status_desc[j].desc, path_type_desc[i].desc,
1946 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1947 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1948 } else if (cfg->cascaded_expander == 0xff) {
1949 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1950 "WWN=%08X%08X\n", path_status_desc[j].desc,
1951 path_type_desc[i].desc, cfg->phy,
1952 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1953 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1954 } else if (cfg->phy == 0xff) {
1955 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1956 "WWN=%08X%08X\n", path_status_desc[j].desc,
1957 path_type_desc[i].desc, cfg->cascaded_expander,
1958 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1959 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1961 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1962 "WWN=%08X%08X\n", path_status_desc[j].desc,
1963 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1964 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1965 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1972 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1973 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1974 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1975 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1979 * ipr_log64_path_elem - Log a fabric path element.
1980 * @hostrcb: hostrcb struct
1981 * @cfg: fabric path element struct
1986 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
1987 struct ipr_hostrcb64_config_element *cfg)
1990 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
1991 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1992 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1993 char buffer[IPR_MAX_RES_PATH_LENGTH];
1995 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
1998 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1999 if (path_type_desc[i].type != type)
2002 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2003 if (path_status_desc[j].status != status)
2006 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2007 path_status_desc[j].desc, path_type_desc[i].desc,
2008 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2009 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2010 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2014 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2015 "WWN=%08X%08X\n", cfg->type_status,
2016 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2017 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2018 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2022 * ipr_log_fabric_error - Log a fabric error.
2023 * @ioa_cfg: ioa config struct
2024 * @hostrcb: hostrcb struct
2029 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2030 struct ipr_hostrcb *hostrcb)
2032 struct ipr_hostrcb_type_20_error *error;
2033 struct ipr_hostrcb_fabric_desc *fabric;
2034 struct ipr_hostrcb_config_element *cfg;
2037 error = &hostrcb->hcam.u.error.u.type_20_error;
2038 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2039 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2041 add_len = be32_to_cpu(hostrcb->hcam.length) -
2042 (offsetof(struct ipr_hostrcb_error, u) +
2043 offsetof(struct ipr_hostrcb_type_20_error, desc));
2045 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2046 ipr_log_fabric_path(hostrcb, fabric);
2047 for_each_fabric_cfg(fabric, cfg)
2048 ipr_log_path_elem(hostrcb, cfg);
2050 add_len -= be16_to_cpu(fabric->length);
2051 fabric = (struct ipr_hostrcb_fabric_desc *)
2052 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2055 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2059 * ipr_log_sis64_array_error - Log a sis64 array error.
2060 * @ioa_cfg: ioa config struct
2061 * @hostrcb: hostrcb struct
2066 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2067 struct ipr_hostrcb *hostrcb)
2070 struct ipr_hostrcb_type_24_error *error;
2071 struct ipr_hostrcb64_array_data_entry *array_entry;
2072 char buffer[IPR_MAX_RES_PATH_LENGTH];
2073 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2075 error = &hostrcb->hcam.u.error64.u.type_24_error;
2079 ipr_err("RAID %s Array Configuration: %s\n",
2080 error->protection_level,
2081 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2085 array_entry = error->array_member;
2086 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2087 sizeof(error->array_member));
2089 for (i = 0; i < num_entries; i++, array_entry++) {
2091 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2094 if (error->exposed_mode_adn == i)
2095 ipr_err("Exposed Array Member %d:\n", i);
2097 ipr_err("Array Member %d:\n", i);
2099 ipr_err("Array Member %d:\n", i);
2100 ipr_log_ext_vpd(&array_entry->vpd);
2101 ipr_err("Current Location: %s",
2102 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2103 ipr_err("Expected Location: %s",
2104 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2111 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2112 * @ioa_cfg: ioa config struct
2113 * @hostrcb: hostrcb struct
2118 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2119 struct ipr_hostrcb *hostrcb)
2121 struct ipr_hostrcb_type_30_error *error;
2122 struct ipr_hostrcb64_fabric_desc *fabric;
2123 struct ipr_hostrcb64_config_element *cfg;
2126 error = &hostrcb->hcam.u.error64.u.type_30_error;
2128 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2129 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2131 add_len = be32_to_cpu(hostrcb->hcam.length) -
2132 (offsetof(struct ipr_hostrcb64_error, u) +
2133 offsetof(struct ipr_hostrcb_type_30_error, desc));
2135 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2136 ipr_log64_fabric_path(hostrcb, fabric);
2137 for_each_fabric_cfg(fabric, cfg)
2138 ipr_log64_path_elem(hostrcb, cfg);
2140 add_len -= be16_to_cpu(fabric->length);
2141 fabric = (struct ipr_hostrcb64_fabric_desc *)
2142 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2145 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2149 * ipr_log_generic_error - Log an adapter error.
2150 * @ioa_cfg: ioa config struct
2151 * @hostrcb: hostrcb struct
2156 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2157 struct ipr_hostrcb *hostrcb)
2159 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2160 be32_to_cpu(hostrcb->hcam.length));
2164 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2167 * This function will return the index of into the ipr_error_table
2168 * for the specified IOASC. If the IOASC is not in the table,
2169 * 0 will be returned, which points to the entry used for unknown errors.
2172 * index into the ipr_error_table
2174 static u32 ipr_get_error(u32 ioasc)
2178 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2179 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2186 * ipr_handle_log_data - Log an adapter error.
2187 * @ioa_cfg: ioa config struct
2188 * @hostrcb: hostrcb struct
2190 * This function logs an adapter error to the system.
2195 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2196 struct ipr_hostrcb *hostrcb)
2201 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2204 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2205 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2208 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2210 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2212 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2213 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2214 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2215 scsi_report_bus_reset(ioa_cfg->host,
2216 hostrcb->hcam.u.error.fd_res_addr.bus);
2219 error_index = ipr_get_error(ioasc);
2221 if (!ipr_error_table[error_index].log_hcam)
2224 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2226 /* Set indication we have logged an error */
2227 ioa_cfg->errors_logged++;
2229 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2231 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2232 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2234 switch (hostrcb->hcam.overlay_id) {
2235 case IPR_HOST_RCB_OVERLAY_ID_2:
2236 ipr_log_cache_error(ioa_cfg, hostrcb);
2238 case IPR_HOST_RCB_OVERLAY_ID_3:
2239 ipr_log_config_error(ioa_cfg, hostrcb);
2241 case IPR_HOST_RCB_OVERLAY_ID_4:
2242 case IPR_HOST_RCB_OVERLAY_ID_6:
2243 ipr_log_array_error(ioa_cfg, hostrcb);
2245 case IPR_HOST_RCB_OVERLAY_ID_7:
2246 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2248 case IPR_HOST_RCB_OVERLAY_ID_12:
2249 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2251 case IPR_HOST_RCB_OVERLAY_ID_13:
2252 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2254 case IPR_HOST_RCB_OVERLAY_ID_14:
2255 case IPR_HOST_RCB_OVERLAY_ID_16:
2256 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2258 case IPR_HOST_RCB_OVERLAY_ID_17:
2259 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2261 case IPR_HOST_RCB_OVERLAY_ID_20:
2262 ipr_log_fabric_error(ioa_cfg, hostrcb);
2264 case IPR_HOST_RCB_OVERLAY_ID_23:
2265 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2267 case IPR_HOST_RCB_OVERLAY_ID_24:
2268 case IPR_HOST_RCB_OVERLAY_ID_26:
2269 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2271 case IPR_HOST_RCB_OVERLAY_ID_30:
2272 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2274 case IPR_HOST_RCB_OVERLAY_ID_1:
2275 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2277 ipr_log_generic_error(ioa_cfg, hostrcb);
2283 * ipr_process_error - Op done function for an adapter error log.
2284 * @ipr_cmd: ipr command struct
2286 * This function is the op done function for an error log host
2287 * controlled async from the adapter. It will log the error and
2288 * send the HCAM back to the adapter.
2293 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2295 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2296 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2297 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2301 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2303 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2305 list_del(&hostrcb->queue);
2306 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2309 ipr_handle_log_data(ioa_cfg, hostrcb);
2310 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2311 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2312 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2313 dev_err(&ioa_cfg->pdev->dev,
2314 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2317 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2321 * ipr_timeout - An internally generated op has timed out.
2322 * @ipr_cmd: ipr command struct
2324 * This function blocks host requests and initiates an
2330 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2332 unsigned long lock_flags = 0;
2333 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2336 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2338 ioa_cfg->errors_logged++;
2339 dev_err(&ioa_cfg->pdev->dev,
2340 "Adapter being reset due to command timeout.\n");
2342 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2343 ioa_cfg->sdt_state = GET_DUMP;
2345 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2346 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2348 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2353 * ipr_oper_timeout - Adapter timed out transitioning to operational
2354 * @ipr_cmd: ipr command struct
2356 * This function blocks host requests and initiates an
2362 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2364 unsigned long lock_flags = 0;
2365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2368 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2370 ioa_cfg->errors_logged++;
2371 dev_err(&ioa_cfg->pdev->dev,
2372 "Adapter timed out transitioning to operational.\n");
2374 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2375 ioa_cfg->sdt_state = GET_DUMP;
2377 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2379 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2380 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2388 * ipr_reset_reload - Reset/Reload the IOA
2389 * @ioa_cfg: ioa config struct
2390 * @shutdown_type: shutdown type
2392 * This function resets the adapter and re-initializes it.
2393 * This function assumes that all new host commands have been stopped.
2397 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2398 enum ipr_shutdown_type shutdown_type)
2400 if (!ioa_cfg->in_reset_reload)
2401 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2403 spin_unlock_irq(ioa_cfg->host->host_lock);
2404 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2405 spin_lock_irq(ioa_cfg->host->host_lock);
2407 /* If we got hit with a host reset while we were already resetting
2408 the adapter for some reason, and the reset failed. */
2409 if (ioa_cfg->ioa_is_dead) {
2418 * ipr_find_ses_entry - Find matching SES in SES table
2419 * @res: resource entry struct of SES
2422 * pointer to SES table entry / NULL on failure
2424 static const struct ipr_ses_table_entry *
2425 ipr_find_ses_entry(struct ipr_resource_entry *res)
2428 struct ipr_std_inq_vpids *vpids;
2429 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2431 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2432 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2433 if (ste->compare_product_id_byte[j] == 'X') {
2434 vpids = &res->std_inq_data.vpids;
2435 if (vpids->product_id[j] == ste->product_id[j])
2443 if (matches == IPR_PROD_ID_LEN)
2451 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2452 * @ioa_cfg: ioa config struct
2454 * @bus_width: bus width
2457 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2458 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2459 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2460 * max 160MHz = max 320MB/sec).
2462 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2464 struct ipr_resource_entry *res;
2465 const struct ipr_ses_table_entry *ste;
2466 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2468 /* Loop through each config table entry in the config table buffer */
2469 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2470 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2473 if (bus != res->bus)
2476 if (!(ste = ipr_find_ses_entry(res)))
2479 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2482 return max_xfer_rate;
2486 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2487 * @ioa_cfg: ioa config struct
2488 * @max_delay: max delay in micro-seconds to wait
2490 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2493 * 0 on success / other on failure
2495 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2497 volatile u32 pcii_reg;
2500 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2501 while (delay < max_delay) {
2502 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2504 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2507 /* udelay cannot be used if delay is more than a few milliseconds */
2508 if ((delay / 1000) > MAX_UDELAY_MS)
2509 mdelay(delay / 1000);
2519 * ipr_get_sis64_dump_data_section - Dump IOA memory
2520 * @ioa_cfg: ioa config struct
2521 * @start_addr: adapter address to dump
2522 * @dest: destination kernel buffer
2523 * @length_in_words: length to dump in 4 byte words
2528 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2530 __be32 *dest, u32 length_in_words)
2534 for (i = 0; i < length_in_words; i++) {
2535 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2536 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2544 * ipr_get_ldump_data_section - Dump IOA memory
2545 * @ioa_cfg: ioa config struct
2546 * @start_addr: adapter address to dump
2547 * @dest: destination kernel buffer
2548 * @length_in_words: length to dump in 4 byte words
2551 * 0 on success / -EIO on failure
2553 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2555 __be32 *dest, u32 length_in_words)
2557 volatile u32 temp_pcii_reg;
2561 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2562 dest, length_in_words);
2564 /* Write IOA interrupt reg starting LDUMP state */
2565 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2566 ioa_cfg->regs.set_uproc_interrupt_reg);
2568 /* Wait for IO debug acknowledge */
2569 if (ipr_wait_iodbg_ack(ioa_cfg,
2570 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2571 dev_err(&ioa_cfg->pdev->dev,
2572 "IOA dump long data transfer timeout\n");
2576 /* Signal LDUMP interlocked - clear IO debug ack */
2577 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2578 ioa_cfg->regs.clr_interrupt_reg);
2580 /* Write Mailbox with starting address */
2581 writel(start_addr, ioa_cfg->ioa_mailbox);
2583 /* Signal address valid - clear IOA Reset alert */
2584 writel(IPR_UPROCI_RESET_ALERT,
2585 ioa_cfg->regs.clr_uproc_interrupt_reg);
2587 for (i = 0; i < length_in_words; i++) {
2588 /* Wait for IO debug acknowledge */
2589 if (ipr_wait_iodbg_ack(ioa_cfg,
2590 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2591 dev_err(&ioa_cfg->pdev->dev,
2592 "IOA dump short data transfer timeout\n");
2596 /* Read data from mailbox and increment destination pointer */
2597 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2600 /* For all but the last word of data, signal data received */
2601 if (i < (length_in_words - 1)) {
2602 /* Signal dump data received - Clear IO debug Ack */
2603 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2604 ioa_cfg->regs.clr_interrupt_reg);
2608 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2609 writel(IPR_UPROCI_RESET_ALERT,
2610 ioa_cfg->regs.set_uproc_interrupt_reg);
2612 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2613 ioa_cfg->regs.clr_uproc_interrupt_reg);
2615 /* Signal dump data received - Clear IO debug Ack */
2616 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2617 ioa_cfg->regs.clr_interrupt_reg);
2619 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2620 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2622 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2624 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2634 #ifdef CONFIG_SCSI_IPR_DUMP
2636 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2637 * @ioa_cfg: ioa config struct
2638 * @pci_address: adapter address
2639 * @length: length of data to copy
2641 * Copy data from PCI adapter to kernel buffer.
2642 * Note: length MUST be a 4 byte multiple
2644 * 0 on success / other on failure
2646 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2647 unsigned long pci_address, u32 length)
2649 int bytes_copied = 0;
2650 int cur_len, rc, rem_len, rem_page_len;
2652 unsigned long lock_flags = 0;
2653 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2655 while (bytes_copied < length &&
2656 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2657 if (ioa_dump->page_offset >= PAGE_SIZE ||
2658 ioa_dump->page_offset == 0) {
2659 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2663 return bytes_copied;
2666 ioa_dump->page_offset = 0;
2667 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2668 ioa_dump->next_page_index++;
2670 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2672 rem_len = length - bytes_copied;
2673 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2674 cur_len = min(rem_len, rem_page_len);
2676 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2677 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2680 rc = ipr_get_ldump_data_section(ioa_cfg,
2681 pci_address + bytes_copied,
2682 &page[ioa_dump->page_offset / 4],
2683 (cur_len / sizeof(u32)));
2685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 ioa_dump->page_offset += cur_len;
2689 bytes_copied += cur_len;
2697 return bytes_copied;
2701 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2702 * @hdr: dump entry header struct
2707 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2709 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2711 hdr->offset = sizeof(*hdr);
2712 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2716 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2717 * @ioa_cfg: ioa config struct
2718 * @driver_dump: driver dump struct
2723 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2724 struct ipr_driver_dump *driver_dump)
2726 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2728 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2729 driver_dump->ioa_type_entry.hdr.len =
2730 sizeof(struct ipr_dump_ioa_type_entry) -
2731 sizeof(struct ipr_dump_entry_header);
2732 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2733 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2734 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2735 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2736 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2737 ucode_vpd->minor_release[1];
2738 driver_dump->hdr.num_entries++;
2742 * ipr_dump_version_data - Fill in the driver version in the dump.
2743 * @ioa_cfg: ioa config struct
2744 * @driver_dump: driver dump struct
2749 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2750 struct ipr_driver_dump *driver_dump)
2752 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2753 driver_dump->version_entry.hdr.len =
2754 sizeof(struct ipr_dump_version_entry) -
2755 sizeof(struct ipr_dump_entry_header);
2756 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2757 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2758 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2759 driver_dump->hdr.num_entries++;
2763 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2764 * @ioa_cfg: ioa config struct
2765 * @driver_dump: driver dump struct
2770 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2771 struct ipr_driver_dump *driver_dump)
2773 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2774 driver_dump->trace_entry.hdr.len =
2775 sizeof(struct ipr_dump_trace_entry) -
2776 sizeof(struct ipr_dump_entry_header);
2777 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2778 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2779 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2780 driver_dump->hdr.num_entries++;
2784 * ipr_dump_location_data - Fill in the IOA location in the dump.
2785 * @ioa_cfg: ioa config struct
2786 * @driver_dump: driver dump struct
2791 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2792 struct ipr_driver_dump *driver_dump)
2794 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2795 driver_dump->location_entry.hdr.len =
2796 sizeof(struct ipr_dump_location_entry) -
2797 sizeof(struct ipr_dump_entry_header);
2798 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2799 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2800 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2801 driver_dump->hdr.num_entries++;
2805 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2806 * @ioa_cfg: ioa config struct
2807 * @dump: dump struct
2812 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2814 unsigned long start_addr, sdt_word;
2815 unsigned long lock_flags = 0;
2816 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2817 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2818 u32 num_entries, start_off, end_off;
2819 u32 bytes_to_copy, bytes_copied, rc;
2820 struct ipr_sdt *sdt;
2826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2828 if (ioa_cfg->sdt_state != GET_DUMP) {
2829 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2833 start_addr = readl(ioa_cfg->ioa_mailbox);
2835 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2836 dev_err(&ioa_cfg->pdev->dev,
2837 "Invalid dump table format: %lx\n", start_addr);
2838 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2842 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2844 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2846 /* Initialize the overall dump header */
2847 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2848 driver_dump->hdr.num_entries = 1;
2849 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2850 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2851 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2852 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2854 ipr_dump_version_data(ioa_cfg, driver_dump);
2855 ipr_dump_location_data(ioa_cfg, driver_dump);
2856 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2857 ipr_dump_trace_data(ioa_cfg, driver_dump);
2859 /* Update dump_header */
2860 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2862 /* IOA Dump entry */
2863 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2864 ioa_dump->hdr.len = 0;
2865 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2866 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2868 /* First entries in sdt are actually a list of dump addresses and
2869 lengths to gather the real dump data. sdt represents the pointer
2870 to the ioa generated dump table. Dump data will be extracted based
2871 on entries in this table */
2872 sdt = &ioa_dump->sdt;
2874 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2875 sizeof(struct ipr_sdt) / sizeof(__be32));
2877 /* Smart Dump table is ready to use and the first entry is valid */
2878 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2879 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2880 dev_err(&ioa_cfg->pdev->dev,
2881 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2882 rc, be32_to_cpu(sdt->hdr.state));
2883 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2884 ioa_cfg->sdt_state = DUMP_OBTAINED;
2885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2889 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2891 if (num_entries > IPR_NUM_SDT_ENTRIES)
2892 num_entries = IPR_NUM_SDT_ENTRIES;
2894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2896 for (i = 0; i < num_entries; i++) {
2897 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2898 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2902 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2903 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2905 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2907 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2908 end_off = be32_to_cpu(sdt->entry[i].end_token);
2910 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2911 bytes_to_copy = end_off - start_off;
2916 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2917 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2921 /* Copy data from adapter to driver buffers */
2922 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2925 ioa_dump->hdr.len += bytes_copied;
2927 if (bytes_copied != bytes_to_copy) {
2928 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2935 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2937 /* Update dump_header */
2938 driver_dump->hdr.len += ioa_dump->hdr.len;
2940 ioa_cfg->sdt_state = DUMP_OBTAINED;
2945 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2949 * ipr_release_dump - Free adapter dump memory
2950 * @kref: kref struct
2955 static void ipr_release_dump(struct kref *kref)
2957 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2958 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2959 unsigned long lock_flags = 0;
2963 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2964 ioa_cfg->dump = NULL;
2965 ioa_cfg->sdt_state = INACTIVE;
2966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2968 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2969 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2976 * ipr_worker_thread - Worker thread
2977 * @work: ioa config struct
2979 * Called at task level from a work thread. This function takes care
2980 * of adding and removing device from the mid-layer as configuration
2981 * changes are detected by the adapter.
2986 static void ipr_worker_thread(struct work_struct *work)
2988 unsigned long lock_flags;
2989 struct ipr_resource_entry *res;
2990 struct scsi_device *sdev;
2991 struct ipr_dump *dump;
2992 struct ipr_ioa_cfg *ioa_cfg =
2993 container_of(work, struct ipr_ioa_cfg, work_q);
2994 u8 bus, target, lun;
2998 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3000 if (ioa_cfg->sdt_state == GET_DUMP) {
3001 dump = ioa_cfg->dump;
3003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3006 kref_get(&dump->kref);
3007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3008 ipr_get_ioa_dump(ioa_cfg, dump);
3009 kref_put(&dump->kref, ipr_release_dump);
3011 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3012 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3013 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3014 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3021 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3026 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3027 if (res->del_from_ml && res->sdev) {
3030 if (!scsi_device_get(sdev)) {
3031 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3032 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3033 scsi_remove_device(sdev);
3034 scsi_device_put(sdev);
3035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3042 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3043 if (res->add_to_ml) {
3045 target = res->target;
3048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3049 scsi_add_device(ioa_cfg->host, bus, target, lun);
3050 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3055 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3056 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3060 #ifdef CONFIG_SCSI_IPR_TRACE
3062 * ipr_read_trace - Dump the adapter trace
3063 * @kobj: kobject struct
3064 * @bin_attr: bin_attribute struct
3067 * @count: buffer size
3070 * number of bytes printed to buffer
3072 static ssize_t ipr_read_trace(struct kobject *kobj,
3073 struct bin_attribute *bin_attr,
3074 char *buf, loff_t off, size_t count)
3076 struct device *dev = container_of(kobj, struct device, kobj);
3077 struct Scsi_Host *shost = class_to_shost(dev);
3078 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3079 unsigned long lock_flags = 0;
3082 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3083 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3085 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3090 static struct bin_attribute ipr_trace_attr = {
3096 .read = ipr_read_trace,
3100 static const struct {
3101 enum ipr_cache_state state;
3103 } cache_state [] = {
3104 { CACHE_NONE, "none" },
3105 { CACHE_DISABLED, "disabled" },
3106 { CACHE_ENABLED, "enabled" }
3110 * ipr_show_write_caching - Show the write caching attribute
3111 * @dev: device struct
3115 * number of bytes printed to buffer
3117 static ssize_t ipr_show_write_caching(struct device *dev,
3118 struct device_attribute *attr, char *buf)
3120 struct Scsi_Host *shost = class_to_shost(dev);
3121 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3122 unsigned long lock_flags = 0;
3125 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3126 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
3127 if (cache_state[i].state == ioa_cfg->cache_state) {
3128 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
3132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3138 * ipr_store_write_caching - Enable/disable adapter write cache
3139 * @dev: device struct
3141 * @count: buffer size
3143 * This function will enable/disable adapter write cache.
3146 * count on success / other on failure
3148 static ssize_t ipr_store_write_caching(struct device *dev,
3149 struct device_attribute *attr,
3150 const char *buf, size_t count)
3152 struct Scsi_Host *shost = class_to_shost(dev);
3153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3154 unsigned long lock_flags = 0;
3155 enum ipr_cache_state new_state = CACHE_INVALID;
3158 if (!capable(CAP_SYS_ADMIN))
3160 if (ioa_cfg->cache_state == CACHE_NONE)
3163 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
3164 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
3165 new_state = cache_state[i].state;
3170 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
3173 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3174 if (ioa_cfg->cache_state == new_state) {
3175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3179 ioa_cfg->cache_state = new_state;
3180 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
3181 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
3182 if (!ioa_cfg->in_reset_reload)
3183 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3190 static struct device_attribute ipr_ioa_cache_attr = {
3192 .name = "write_cache",
3193 .mode = S_IRUGO | S_IWUSR,
3195 .show = ipr_show_write_caching,
3196 .store = ipr_store_write_caching
3200 * ipr_show_fw_version - Show the firmware version
3201 * @dev: class device struct
3205 * number of bytes printed to buffer
3207 static ssize_t ipr_show_fw_version(struct device *dev,
3208 struct device_attribute *attr, char *buf)
3210 struct Scsi_Host *shost = class_to_shost(dev);
3211 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3212 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3213 unsigned long lock_flags = 0;
3216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3217 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3218 ucode_vpd->major_release, ucode_vpd->card_type,
3219 ucode_vpd->minor_release[0],
3220 ucode_vpd->minor_release[1]);
3221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225 static struct device_attribute ipr_fw_version_attr = {
3227 .name = "fw_version",
3230 .show = ipr_show_fw_version,
3234 * ipr_show_log_level - Show the adapter's error logging level
3235 * @dev: class device struct
3239 * number of bytes printed to buffer
3241 static ssize_t ipr_show_log_level(struct device *dev,
3242 struct device_attribute *attr, char *buf)
3244 struct Scsi_Host *shost = class_to_shost(dev);
3245 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3246 unsigned long lock_flags = 0;
3249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3250 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3251 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3256 * ipr_store_log_level - Change the adapter's error logging level
3257 * @dev: class device struct
3261 * number of bytes printed to buffer
3263 static ssize_t ipr_store_log_level(struct device *dev,
3264 struct device_attribute *attr,
3265 const char *buf, size_t count)
3267 struct Scsi_Host *shost = class_to_shost(dev);
3268 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3269 unsigned long lock_flags = 0;
3271 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3272 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3273 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3277 static struct device_attribute ipr_log_level_attr = {
3279 .name = "log_level",
3280 .mode = S_IRUGO | S_IWUSR,
3282 .show = ipr_show_log_level,
3283 .store = ipr_store_log_level
3287 * ipr_store_diagnostics - IOA Diagnostics interface
3288 * @dev: device struct
3290 * @count: buffer size
3292 * This function will reset the adapter and wait a reasonable
3293 * amount of time for any errors that the adapter might log.
3296 * count on success / other on failure
3298 static ssize_t ipr_store_diagnostics(struct device *dev,
3299 struct device_attribute *attr,
3300 const char *buf, size_t count)
3302 struct Scsi_Host *shost = class_to_shost(dev);
3303 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3304 unsigned long lock_flags = 0;
3307 if (!capable(CAP_SYS_ADMIN))
3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3311 while(ioa_cfg->in_reset_reload) {
3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3317 ioa_cfg->errors_logged = 0;
3318 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3320 if (ioa_cfg->in_reset_reload) {
3321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3322 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3324 /* Wait for a second for any errors to be logged */
3327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3332 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3334 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3339 static struct device_attribute ipr_diagnostics_attr = {
3341 .name = "run_diagnostics",
3344 .store = ipr_store_diagnostics
3348 * ipr_show_adapter_state - Show the adapter's state
3349 * @class_dev: device struct
3353 * number of bytes printed to buffer
3355 static ssize_t ipr_show_adapter_state(struct device *dev,
3356 struct device_attribute *attr, char *buf)
3358 struct Scsi_Host *shost = class_to_shost(dev);
3359 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360 unsigned long lock_flags = 0;
3363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3364 if (ioa_cfg->ioa_is_dead)
3365 len = snprintf(buf, PAGE_SIZE, "offline\n");
3367 len = snprintf(buf, PAGE_SIZE, "online\n");
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3373 * ipr_store_adapter_state - Change adapter state
3374 * @dev: device struct
3376 * @count: buffer size
3378 * This function will change the adapter's state.
3381 * count on success / other on failure
3383 static ssize_t ipr_store_adapter_state(struct device *dev,
3384 struct device_attribute *attr,
3385 const char *buf, size_t count)
3387 struct Scsi_Host *shost = class_to_shost(dev);
3388 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3389 unsigned long lock_flags;
3392 if (!capable(CAP_SYS_ADMIN))
3395 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3396 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3397 ioa_cfg->ioa_is_dead = 0;
3398 ioa_cfg->reset_retries = 0;
3399 ioa_cfg->in_ioa_bringdown = 0;
3400 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3403 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3408 static struct device_attribute ipr_ioa_state_attr = {
3410 .name = "online_state",
3411 .mode = S_IRUGO | S_IWUSR,
3413 .show = ipr_show_adapter_state,
3414 .store = ipr_store_adapter_state
3418 * ipr_store_reset_adapter - Reset the adapter
3419 * @dev: device struct
3421 * @count: buffer size
3423 * This function will reset the adapter.
3426 * count on success / other on failure
3428 static ssize_t ipr_store_reset_adapter(struct device *dev,
3429 struct device_attribute *attr,
3430 const char *buf, size_t count)
3432 struct Scsi_Host *shost = class_to_shost(dev);
3433 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3434 unsigned long lock_flags;
3437 if (!capable(CAP_SYS_ADMIN))
3440 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3441 if (!ioa_cfg->in_reset_reload)
3442 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3444 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3449 static struct device_attribute ipr_ioa_reset_attr = {
3451 .name = "reset_host",
3454 .store = ipr_store_reset_adapter
3458 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3459 * @buf_len: buffer length
3461 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3462 * list to use for microcode download
3465 * pointer to sglist / NULL on failure
3467 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3469 int sg_size, order, bsize_elem, num_elem, i, j;
3470 struct ipr_sglist *sglist;
3471 struct scatterlist *scatterlist;
3474 /* Get the minimum size per scatter/gather element */
3475 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3477 /* Get the actual size per element */
3478 order = get_order(sg_size);
3480 /* Determine the actual number of bytes per element */
3481 bsize_elem = PAGE_SIZE * (1 << order);
3483 /* Determine the actual number of sg entries needed */
3484 if (buf_len % bsize_elem)
3485 num_elem = (buf_len / bsize_elem) + 1;
3487 num_elem = buf_len / bsize_elem;
3489 /* Allocate a scatter/gather list for the DMA */
3490 sglist = kzalloc(sizeof(struct ipr_sglist) +
3491 (sizeof(struct scatterlist) * (num_elem - 1)),
3494 if (sglist == NULL) {
3499 scatterlist = sglist->scatterlist;
3500 sg_init_table(scatterlist, num_elem);
3502 sglist->order = order;
3503 sglist->num_sg = num_elem;
3505 /* Allocate a bunch of sg elements */
3506 for (i = 0; i < num_elem; i++) {
3507 page = alloc_pages(GFP_KERNEL, order);
3511 /* Free up what we already allocated */
3512 for (j = i - 1; j >= 0; j--)
3513 __free_pages(sg_page(&scatterlist[j]), order);
3518 sg_set_page(&scatterlist[i], page, 0, 0);
3525 * ipr_free_ucode_buffer - Frees a microcode download buffer
3526 * @p_dnld: scatter/gather list pointer
3528 * Free a DMA'able ucode download buffer previously allocated with
3529 * ipr_alloc_ucode_buffer
3534 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3538 for (i = 0; i < sglist->num_sg; i++)
3539 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3545 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3546 * @sglist: scatter/gather list pointer
3547 * @buffer: buffer pointer
3548 * @len: buffer length
3550 * Copy a microcode image from a user buffer into a buffer allocated by
3551 * ipr_alloc_ucode_buffer
3554 * 0 on success / other on failure
3556 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3557 u8 *buffer, u32 len)
3559 int bsize_elem, i, result = 0;
3560 struct scatterlist *scatterlist;
3563 /* Determine the actual number of bytes per element */
3564 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3566 scatterlist = sglist->scatterlist;
3568 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3569 struct page *page = sg_page(&scatterlist[i]);
3572 memcpy(kaddr, buffer, bsize_elem);
3575 scatterlist[i].length = bsize_elem;
3583 if (len % bsize_elem) {
3584 struct page *page = sg_page(&scatterlist[i]);
3587 memcpy(kaddr, buffer, len % bsize_elem);
3590 scatterlist[i].length = len % bsize_elem;
3593 sglist->buffer_len = len;
3598 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3599 * @ipr_cmd: ipr command struct
3600 * @sglist: scatter/gather list
3602 * Builds a microcode download IOA data list (IOADL).
3605 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3606 struct ipr_sglist *sglist)
3608 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3609 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3610 struct scatterlist *scatterlist = sglist->scatterlist;
3613 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3614 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3615 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3618 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3619 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3620 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3621 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3622 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3625 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3629 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3630 * @ipr_cmd: ipr command struct
3631 * @sglist: scatter/gather list
3633 * Builds a microcode download IOA data list (IOADL).
3636 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3637 struct ipr_sglist *sglist)
3639 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3640 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3641 struct scatterlist *scatterlist = sglist->scatterlist;
3644 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3645 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3646 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3649 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3651 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3652 ioadl[i].flags_and_data_len =
3653 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3655 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3658 ioadl[i-1].flags_and_data_len |=
3659 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3663 * ipr_update_ioa_ucode - Update IOA's microcode
3664 * @ioa_cfg: ioa config struct
3665 * @sglist: scatter/gather list
3667 * Initiate an adapter reset to update the IOA's microcode
3670 * 0 on success / -EIO on failure
3672 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3673 struct ipr_sglist *sglist)
3675 unsigned long lock_flags;
3677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3678 while(ioa_cfg->in_reset_reload) {
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3681 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3684 if (ioa_cfg->ucode_sglist) {
3685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3686 dev_err(&ioa_cfg->pdev->dev,
3687 "Microcode download already in progress\n");
3691 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3692 sglist->num_sg, DMA_TO_DEVICE);
3694 if (!sglist->num_dma_sg) {
3695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3696 dev_err(&ioa_cfg->pdev->dev,
3697 "Failed to map microcode download buffer!\n");
3701 ioa_cfg->ucode_sglist = sglist;
3702 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3703 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3704 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3707 ioa_cfg->ucode_sglist = NULL;
3708 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3713 * ipr_store_update_fw - Update the firmware on the adapter
3714 * @class_dev: device struct
3716 * @count: buffer size
3718 * This function will update the firmware on the adapter.
3721 * count on success / other on failure
3723 static ssize_t ipr_store_update_fw(struct device *dev,
3724 struct device_attribute *attr,
3725 const char *buf, size_t count)
3727 struct Scsi_Host *shost = class_to_shost(dev);
3728 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3729 struct ipr_ucode_image_header *image_hdr;
3730 const struct firmware *fw_entry;
3731 struct ipr_sglist *sglist;
3734 int len, result, dnld_size;
3736 if (!capable(CAP_SYS_ADMIN))
3739 len = snprintf(fname, 99, "%s", buf);
3740 fname[len-1] = '\0';
3742 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3743 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3747 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3749 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3750 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3751 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3752 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3753 release_firmware(fw_entry);
3757 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3758 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3759 sglist = ipr_alloc_ucode_buffer(dnld_size);
3762 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3763 release_firmware(fw_entry);
3767 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3770 dev_err(&ioa_cfg->pdev->dev,
3771 "Microcode buffer copy to DMA buffer failed\n");
3775 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3780 ipr_free_ucode_buffer(sglist);
3781 release_firmware(fw_entry);
3785 static struct device_attribute ipr_update_fw_attr = {
3787 .name = "update_fw",
3790 .store = ipr_store_update_fw
3793 static struct device_attribute *ipr_ioa_attrs[] = {
3794 &ipr_fw_version_attr,
3795 &ipr_log_level_attr,
3796 &ipr_diagnostics_attr,
3797 &ipr_ioa_state_attr,
3798 &ipr_ioa_reset_attr,
3799 &ipr_update_fw_attr,
3800 &ipr_ioa_cache_attr,
3804 #ifdef CONFIG_SCSI_IPR_DUMP
3806 * ipr_read_dump - Dump the adapter
3807 * @kobj: kobject struct
3808 * @bin_attr: bin_attribute struct
3811 * @count: buffer size
3814 * number of bytes printed to buffer
3816 static ssize_t ipr_read_dump(struct kobject *kobj,
3817 struct bin_attribute *bin_attr,
3818 char *buf, loff_t off, size_t count)
3820 struct device *cdev = container_of(kobj, struct device, kobj);
3821 struct Scsi_Host *shost = class_to_shost(cdev);
3822 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3823 struct ipr_dump *dump;
3824 unsigned long lock_flags = 0;
3829 if (!capable(CAP_SYS_ADMIN))
3832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3833 dump = ioa_cfg->dump;
3835 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3839 kref_get(&dump->kref);
3840 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3842 if (off > dump->driver_dump.hdr.len) {
3843 kref_put(&dump->kref, ipr_release_dump);
3847 if (off + count > dump->driver_dump.hdr.len) {
3848 count = dump->driver_dump.hdr.len - off;
3852 if (count && off < sizeof(dump->driver_dump)) {
3853 if (off + count > sizeof(dump->driver_dump))
3854 len = sizeof(dump->driver_dump) - off;
3857 src = (u8 *)&dump->driver_dump + off;
3858 memcpy(buf, src, len);
3864 off -= sizeof(dump->driver_dump);
3866 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3867 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3868 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3871 src = (u8 *)&dump->ioa_dump + off;
3872 memcpy(buf, src, len);
3878 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3881 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3882 len = PAGE_ALIGN(off) - off;
3885 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3886 src += off & ~PAGE_MASK;
3887 memcpy(buf, src, len);
3893 kref_put(&dump->kref, ipr_release_dump);
3898 * ipr_alloc_dump - Prepare for adapter dump
3899 * @ioa_cfg: ioa config struct
3902 * 0 on success / other on failure
3904 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3906 struct ipr_dump *dump;
3907 unsigned long lock_flags = 0;
3909 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3912 ipr_err("Dump memory allocation failed\n");
3916 kref_init(&dump->kref);
3917 dump->ioa_cfg = ioa_cfg;
3919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3921 if (INACTIVE != ioa_cfg->sdt_state) {
3922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3927 ioa_cfg->dump = dump;
3928 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3929 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3930 ioa_cfg->dump_taken = 1;
3931 schedule_work(&ioa_cfg->work_q);
3933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3939 * ipr_free_dump - Free adapter dump memory
3940 * @ioa_cfg: ioa config struct
3943 * 0 on success / other on failure
3945 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3947 struct ipr_dump *dump;
3948 unsigned long lock_flags = 0;
3952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3953 dump = ioa_cfg->dump;
3955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959 ioa_cfg->dump = NULL;
3960 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3962 kref_put(&dump->kref, ipr_release_dump);
3969 * ipr_write_dump - Setup dump state of adapter
3970 * @kobj: kobject struct
3971 * @bin_attr: bin_attribute struct
3974 * @count: buffer size
3977 * number of bytes printed to buffer
3979 static ssize_t ipr_write_dump(struct kobject *kobj,
3980 struct bin_attribute *bin_attr,
3981 char *buf, loff_t off, size_t count)
3983 struct device *cdev = container_of(kobj, struct device, kobj);
3984 struct Scsi_Host *shost = class_to_shost(cdev);
3985 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3988 if (!capable(CAP_SYS_ADMIN))
3992 rc = ipr_alloc_dump(ioa_cfg);
3993 else if (buf[0] == '0')
3994 rc = ipr_free_dump(ioa_cfg);
4004 static struct bin_attribute ipr_dump_attr = {
4007 .mode = S_IRUSR | S_IWUSR,
4010 .read = ipr_read_dump,
4011 .write = ipr_write_dump
4014 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4018 * ipr_change_queue_depth - Change the device's queue depth
4019 * @sdev: scsi device struct
4020 * @qdepth: depth to set
4021 * @reason: calling context
4026 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4029 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4030 struct ipr_resource_entry *res;
4031 unsigned long lock_flags = 0;
4033 if (reason != SCSI_QDEPTH_DEFAULT)
4036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4037 res = (struct ipr_resource_entry *)sdev->hostdata;
4039 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4040 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4041 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4043 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4044 return sdev->queue_depth;
4048 * ipr_change_queue_type - Change the device's queue type
4049 * @dsev: scsi device struct
4050 * @tag_type: type of tags to use
4053 * actual queue type set
4055 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4057 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4058 struct ipr_resource_entry *res;
4059 unsigned long lock_flags = 0;
4061 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4062 res = (struct ipr_resource_entry *)sdev->hostdata;
4065 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4067 * We don't bother quiescing the device here since the
4068 * adapter firmware does it for us.
4070 scsi_set_tag_type(sdev, tag_type);
4073 scsi_activate_tcq(sdev, sdev->queue_depth);
4075 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4086 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4087 * @dev: device struct
4091 * number of bytes printed to buffer
4093 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4095 struct scsi_device *sdev = to_scsi_device(dev);
4096 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4097 struct ipr_resource_entry *res;
4098 unsigned long lock_flags = 0;
4099 ssize_t len = -ENXIO;
4101 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4102 res = (struct ipr_resource_entry *)sdev->hostdata;
4104 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4109 static struct device_attribute ipr_adapter_handle_attr = {
4111 .name = "adapter_handle",
4114 .show = ipr_show_adapter_handle
4118 * ipr_show_resource_path - Show the resource path for this device.
4119 * @dev: device struct
4123 * number of bytes printed to buffer
4125 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4127 struct scsi_device *sdev = to_scsi_device(dev);
4128 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4129 struct ipr_resource_entry *res;
4130 unsigned long lock_flags = 0;
4131 ssize_t len = -ENXIO;
4132 char buffer[IPR_MAX_RES_PATH_LENGTH];
4134 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4135 res = (struct ipr_resource_entry *)sdev->hostdata;
4137 len = snprintf(buf, PAGE_SIZE, "%s\n",
4138 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4143 static struct device_attribute ipr_resource_path_attr = {
4145 .name = "resource_path",
4148 .show = ipr_show_resource_path
4151 static struct device_attribute *ipr_dev_attrs[] = {
4152 &ipr_adapter_handle_attr,
4153 &ipr_resource_path_attr,
4158 * ipr_biosparam - Return the HSC mapping
4159 * @sdev: scsi device struct
4160 * @block_device: block device pointer
4161 * @capacity: capacity of the device
4162 * @parm: Array containing returned HSC values.
4164 * This function generates the HSC parms that fdisk uses.
4165 * We want to make sure we return something that places partitions
4166 * on 4k boundaries for best performance with the IOA.
4171 static int ipr_biosparam(struct scsi_device *sdev,
4172 struct block_device *block_device,
4173 sector_t capacity, int *parm)
4181 cylinders = capacity;
4182 sector_div(cylinders, (128 * 32));
4187 parm[2] = cylinders;
4193 * ipr_find_starget - Find target based on bus/target.
4194 * @starget: scsi target struct
4197 * resource entry pointer if found / NULL if not found
4199 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4201 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4202 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4203 struct ipr_resource_entry *res;
4205 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4206 if ((res->bus == starget->channel) &&
4207 (res->target == starget->id) &&
4216 static struct ata_port_info sata_port_info;
4219 * ipr_target_alloc - Prepare for commands to a SCSI target
4220 * @starget: scsi target struct
4222 * If the device is a SATA device, this function allocates an
4223 * ATA port with libata, else it does nothing.
4226 * 0 on success / non-0 on failure
4228 static int ipr_target_alloc(struct scsi_target *starget)
4230 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4231 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4232 struct ipr_sata_port *sata_port;
4233 struct ata_port *ap;
4234 struct ipr_resource_entry *res;
4235 unsigned long lock_flags;
4237 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4238 res = ipr_find_starget(starget);
4239 starget->hostdata = NULL;
4241 if (res && ipr_is_gata(res)) {
4242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4243 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4247 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4250 sata_port->ioa_cfg = ioa_cfg;
4252 sata_port->res = res;
4254 res->sata_port = sata_port;
4255 ap->private_data = sata_port;
4256 starget->hostdata = sata_port;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4268 * ipr_target_destroy - Destroy a SCSI target
4269 * @starget: scsi target struct
4271 * If the device was a SATA device, this function frees the libata
4272 * ATA port, else it does nothing.
4275 static void ipr_target_destroy(struct scsi_target *starget)
4277 struct ipr_sata_port *sata_port = starget->hostdata;
4278 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4279 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4281 if (ioa_cfg->sis64) {
4282 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4283 clear_bit(starget->id, ioa_cfg->array_ids);
4284 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4285 clear_bit(starget->id, ioa_cfg->vset_ids);
4286 else if (starget->channel == 0)
4287 clear_bit(starget->id, ioa_cfg->target_ids);
4291 starget->hostdata = NULL;
4292 ata_sas_port_destroy(sata_port->ap);
4298 * ipr_find_sdev - Find device based on bus/target/lun.
4299 * @sdev: scsi device struct
4302 * resource entry pointer if found / NULL if not found
4304 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4306 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4307 struct ipr_resource_entry *res;
4309 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4310 if ((res->bus == sdev->channel) &&
4311 (res->target == sdev->id) &&
4312 (res->lun == sdev->lun))
4320 * ipr_slave_destroy - Unconfigure a SCSI device
4321 * @sdev: scsi device struct
4326 static void ipr_slave_destroy(struct scsi_device *sdev)
4328 struct ipr_resource_entry *res;
4329 struct ipr_ioa_cfg *ioa_cfg;
4330 unsigned long lock_flags = 0;
4332 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4335 res = (struct ipr_resource_entry *) sdev->hostdata;
4338 ata_port_disable(res->sata_port->ap);
4339 sdev->hostdata = NULL;
4341 res->sata_port = NULL;
4343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4347 * ipr_slave_configure - Configure a SCSI device
4348 * @sdev: scsi device struct
4350 * This function configures the specified scsi device.
4355 static int ipr_slave_configure(struct scsi_device *sdev)
4357 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4358 struct ipr_resource_entry *res;
4359 struct ata_port *ap = NULL;
4360 unsigned long lock_flags = 0;
4361 char buffer[IPR_MAX_RES_PATH_LENGTH];
4363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364 res = sdev->hostdata;
4366 if (ipr_is_af_dasd_device(res))
4367 sdev->type = TYPE_RAID;
4368 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4369 sdev->scsi_level = 4;
4370 sdev->no_uld_attach = 1;
4372 if (ipr_is_vset_device(res)) {
4373 blk_queue_rq_timeout(sdev->request_queue,
4374 IPR_VSET_RW_TIMEOUT);
4375 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4377 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4378 sdev->allow_restart = 1;
4379 if (ipr_is_gata(res) && res->sata_port)
4380 ap = res->sata_port->ap;
4381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4384 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4385 ata_sas_slave_configure(sdev, ap);
4387 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4389 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4390 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4398 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4399 * @sdev: scsi device struct
4401 * This function initializes an ATA port so that future commands
4402 * sent through queuecommand will work.
4407 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4409 struct ipr_sata_port *sata_port = NULL;
4413 if (sdev->sdev_target)
4414 sata_port = sdev->sdev_target->hostdata;
4416 rc = ata_sas_port_init(sata_port->ap);
4418 ipr_slave_destroy(sdev);
4425 * ipr_slave_alloc - Prepare for commands to a device.
4426 * @sdev: scsi device struct
4428 * This function saves a pointer to the resource entry
4429 * in the scsi device struct if the device exists. We
4430 * can then use this pointer in ipr_queuecommand when
4431 * handling new commands.
4434 * 0 on success / -ENXIO if device does not exist
4436 static int ipr_slave_alloc(struct scsi_device *sdev)
4438 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4439 struct ipr_resource_entry *res;
4440 unsigned long lock_flags;
4443 sdev->hostdata = NULL;
4445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4447 res = ipr_find_sdev(sdev);
4452 sdev->hostdata = res;
4453 if (!ipr_is_naca_model(res))
4454 res->needs_sync_complete = 1;
4456 if (ipr_is_gata(res)) {
4457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4458 return ipr_ata_slave_alloc(sdev);
4462 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4468 * ipr_eh_host_reset - Reset the host adapter
4469 * @scsi_cmd: scsi command struct
4474 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4476 struct ipr_ioa_cfg *ioa_cfg;
4480 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4482 dev_err(&ioa_cfg->pdev->dev,
4483 "Adapter being reset as a result of error recovery.\n");
4485 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4486 ioa_cfg->sdt_state = GET_DUMP;
4488 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4494 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4498 spin_lock_irq(cmd->device->host->host_lock);
4499 rc = __ipr_eh_host_reset(cmd);
4500 spin_unlock_irq(cmd->device->host->host_lock);
4506 * ipr_device_reset - Reset the device
4507 * @ioa_cfg: ioa config struct
4508 * @res: resource entry struct
4510 * This function issues a device reset to the affected device.
4511 * If the device is a SCSI device, a LUN reset will be sent
4512 * to the device first. If that does not work, a target reset
4513 * will be sent. If the device is a SATA device, a PHY reset will
4517 * 0 on success / non-zero on failure
4519 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4520 struct ipr_resource_entry *res)
4522 struct ipr_cmnd *ipr_cmd;
4523 struct ipr_ioarcb *ioarcb;
4524 struct ipr_cmd_pkt *cmd_pkt;
4525 struct ipr_ioarcb_ata_regs *regs;
4529 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4530 ioarcb = &ipr_cmd->ioarcb;
4531 cmd_pkt = &ioarcb->cmd_pkt;
4533 if (ipr_cmd->ioa_cfg->sis64) {
4534 regs = &ipr_cmd->i.ata_ioadl.regs;
4535 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4537 regs = &ioarcb->u.add_data.u.regs;
4539 ioarcb->res_handle = res->res_handle;
4540 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4541 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4542 if (ipr_is_gata(res)) {
4543 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4544 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4545 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4548 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4549 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4550 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4551 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4552 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4553 sizeof(struct ipr_ioasa_gata));
4556 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4560 * ipr_sata_reset - Reset the SATA port
4561 * @link: SATA link to reset
4562 * @classes: class of the attached device
4564 * This function issues a SATA phy reset to the affected ATA link.
4567 * 0 on success / non-zero on failure
4569 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4570 unsigned long deadline)
4572 struct ipr_sata_port *sata_port = link->ap->private_data;
4573 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4574 struct ipr_resource_entry *res;
4575 unsigned long lock_flags = 0;
4579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4580 while(ioa_cfg->in_reset_reload) {
4581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4582 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4586 res = sata_port->res;
4588 rc = ipr_device_reset(ioa_cfg, res);
4589 *classes = res->ata_class;
4592 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4598 * ipr_eh_dev_reset - Reset the device
4599 * @scsi_cmd: scsi command struct
4601 * This function issues a device reset to the affected device.
4602 * A LUN reset will be sent to the device first. If that does
4603 * not work, a target reset will be sent.
4608 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4610 struct ipr_cmnd *ipr_cmd;
4611 struct ipr_ioa_cfg *ioa_cfg;
4612 struct ipr_resource_entry *res;
4613 struct ata_port *ap;
4617 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4618 res = scsi_cmd->device->hostdata;
4624 * If we are currently going through reset/reload, return failed. This will force the
4625 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4628 if (ioa_cfg->in_reset_reload)
4630 if (ioa_cfg->ioa_is_dead)
4633 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4634 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4635 if (ipr_cmd->scsi_cmd)
4636 ipr_cmd->done = ipr_scsi_eh_done;
4638 ipr_cmd->done = ipr_sata_eh_done;
4639 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4640 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4641 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4646 res->resetting_device = 1;
4647 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4649 if (ipr_is_gata(res) && res->sata_port) {
4650 ap = res->sata_port->ap;
4651 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4652 ata_std_error_handler(ap);
4653 spin_lock_irq(scsi_cmd->device->host->host_lock);
4655 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4656 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4662 rc = ipr_device_reset(ioa_cfg, res);
4663 res->resetting_device = 0;
4666 return (rc ? FAILED : SUCCESS);
4669 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4673 spin_lock_irq(cmd->device->host->host_lock);
4674 rc = __ipr_eh_dev_reset(cmd);
4675 spin_unlock_irq(cmd->device->host->host_lock);
4681 * ipr_bus_reset_done - Op done function for bus reset.
4682 * @ipr_cmd: ipr command struct
4684 * This function is the op done function for a bus reset
4689 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4691 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4692 struct ipr_resource_entry *res;
4695 if (!ioa_cfg->sis64)
4696 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4697 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4698 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4704 * If abort has not completed, indicate the reset has, else call the
4705 * abort's done function to wake the sleeping eh thread
4707 if (ipr_cmd->sibling->sibling)
4708 ipr_cmd->sibling->sibling = NULL;
4710 ipr_cmd->sibling->done(ipr_cmd->sibling);
4712 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4717 * ipr_abort_timeout - An abort task has timed out
4718 * @ipr_cmd: ipr command struct
4720 * This function handles when an abort task times out. If this
4721 * happens we issue a bus reset since we have resources tied
4722 * up that must be freed before returning to the midlayer.
4727 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4729 struct ipr_cmnd *reset_cmd;
4730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4731 struct ipr_cmd_pkt *cmd_pkt;
4732 unsigned long lock_flags = 0;
4735 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4736 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4737 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4741 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4742 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4743 ipr_cmd->sibling = reset_cmd;
4744 reset_cmd->sibling = ipr_cmd;
4745 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4746 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4747 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4748 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4749 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4751 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4752 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4757 * ipr_cancel_op - Cancel specified op
4758 * @scsi_cmd: scsi command struct
4760 * This function cancels specified op.
4765 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4767 struct ipr_cmnd *ipr_cmd;
4768 struct ipr_ioa_cfg *ioa_cfg;
4769 struct ipr_resource_entry *res;
4770 struct ipr_cmd_pkt *cmd_pkt;
4775 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4776 res = scsi_cmd->device->hostdata;
4778 /* If we are currently going through reset/reload, return failed.
4779 * This will force the mid-layer to call ipr_eh_host_reset,
4780 * which will then go to sleep and wait for the reset to complete
4782 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4784 if (!res || !ipr_is_gscsi(res))
4787 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4788 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4789 ipr_cmd->done = ipr_scsi_eh_done;
4798 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4799 ipr_cmd->ioarcb.res_handle = res->res_handle;
4800 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4801 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4802 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4803 ipr_cmd->u.sdev = scsi_cmd->device;
4805 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4807 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4808 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4811 * If the abort task timed out and we sent a bus reset, we will get
4812 * one the following responses to the abort
4814 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4819 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4820 if (!ipr_is_naca_model(res))
4821 res->needs_sync_complete = 1;
4824 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4828 * ipr_eh_abort - Abort a single op
4829 * @scsi_cmd: scsi command struct
4834 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4836 unsigned long flags;
4841 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4842 rc = ipr_cancel_op(scsi_cmd);
4843 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4850 * ipr_handle_other_interrupt - Handle "other" interrupts
4851 * @ioa_cfg: ioa config struct
4852 * @int_reg: interrupt register
4855 * IRQ_NONE / IRQ_HANDLED
4857 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4858 volatile u32 int_reg)
4860 irqreturn_t rc = IRQ_HANDLED;
4862 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4863 /* Mask the interrupt */
4864 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4866 /* Clear the interrupt */
4867 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4868 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4870 list_del(&ioa_cfg->reset_cmd->queue);
4871 del_timer(&ioa_cfg->reset_cmd->timer);
4872 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4874 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4875 ioa_cfg->ioa_unit_checked = 1;
4877 dev_err(&ioa_cfg->pdev->dev,
4878 "Permanent IOA failure. 0x%08X\n", int_reg);
4880 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4881 ioa_cfg->sdt_state = GET_DUMP;
4883 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4884 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4891 * ipr_isr_eh - Interrupt service routine error handler
4892 * @ioa_cfg: ioa config struct
4893 * @msg: message to log
4898 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4900 ioa_cfg->errors_logged++;
4901 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4903 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4904 ioa_cfg->sdt_state = GET_DUMP;
4906 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4910 * ipr_isr - Interrupt service routine
4912 * @devp: pointer to ioa config struct
4915 * IRQ_NONE / IRQ_HANDLED
4917 static irqreturn_t ipr_isr(int irq, void *devp)
4919 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4920 unsigned long lock_flags = 0;
4921 volatile u32 int_reg, int_mask_reg;
4925 struct ipr_cmnd *ipr_cmd;
4926 irqreturn_t rc = IRQ_NONE;
4928 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4930 /* If interrupts are disabled, ignore the interrupt */
4931 if (!ioa_cfg->allow_interrupts) {
4932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4936 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4937 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4939 /* If an interrupt on the adapter did not occur, ignore it */
4940 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4941 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4948 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4949 ioa_cfg->toggle_bit) {
4951 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4952 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4954 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4955 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4960 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4962 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4964 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4966 list_del(&ipr_cmd->queue);
4967 del_timer(&ipr_cmd->timer);
4968 ipr_cmd->done(ipr_cmd);
4972 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4973 ioa_cfg->hrrq_curr++;
4975 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4976 ioa_cfg->toggle_bit ^= 1u;
4980 if (ipr_cmd != NULL) {
4981 /* Clear the PCI interrupt */
4983 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4984 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4985 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4986 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4988 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4989 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4998 if (unlikely(rc == IRQ_NONE))
4999 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5006 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5007 * @ioa_cfg: ioa config struct
5008 * @ipr_cmd: ipr command struct
5011 * 0 on success / -1 on failure
5013 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5014 struct ipr_cmnd *ipr_cmd)
5017 struct scatterlist *sg;
5019 u32 ioadl_flags = 0;
5020 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5021 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5022 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5024 length = scsi_bufflen(scsi_cmd);
5028 nseg = scsi_dma_map(scsi_cmd);
5030 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5034 ipr_cmd->dma_use_sg = nseg;
5036 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5037 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5038 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5039 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5040 ioadl_flags = IPR_IOADL_FLAGS_READ;
5042 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5043 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5044 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5045 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5048 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5053 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5054 * @ioa_cfg: ioa config struct
5055 * @ipr_cmd: ipr command struct
5058 * 0 on success / -1 on failure
5060 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5061 struct ipr_cmnd *ipr_cmd)
5064 struct scatterlist *sg;
5066 u32 ioadl_flags = 0;
5067 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5068 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5069 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5071 length = scsi_bufflen(scsi_cmd);
5075 nseg = scsi_dma_map(scsi_cmd);
5077 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5081 ipr_cmd->dma_use_sg = nseg;
5083 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5084 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5085 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5086 ioarcb->data_transfer_length = cpu_to_be32(length);
5088 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5089 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5090 ioadl_flags = IPR_IOADL_FLAGS_READ;
5091 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5092 ioarcb->read_ioadl_len =
5093 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5096 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5097 ioadl = ioarcb->u.add_data.u.ioadl;
5098 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5099 offsetof(struct ipr_ioarcb, u.add_data));
5100 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5103 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5104 ioadl[i].flags_and_data_len =
5105 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5106 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5109 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5114 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5115 * @scsi_cmd: scsi command struct
5120 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5123 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5125 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5127 case MSG_SIMPLE_TAG:
5128 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5131 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5133 case MSG_ORDERED_TAG:
5134 rc = IPR_FLAGS_LO_ORDERED_TASK;
5143 * ipr_erp_done - Process completion of ERP for a device
5144 * @ipr_cmd: ipr command struct
5146 * This function copies the sense buffer into the scsi_cmd
5147 * struct and pushes the scsi_done function.
5152 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5154 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5155 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5157 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5159 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5160 scsi_cmd->result |= (DID_ERROR << 16);
5161 scmd_printk(KERN_ERR, scsi_cmd,
5162 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5164 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5165 SCSI_SENSE_BUFFERSIZE);
5169 if (!ipr_is_naca_model(res))
5170 res->needs_sync_complete = 1;
5173 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5174 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5175 scsi_cmd->scsi_done(scsi_cmd);
5179 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5180 * @ipr_cmd: ipr command struct
5185 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5187 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5188 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5189 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5191 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5192 ioarcb->data_transfer_length = 0;
5193 ioarcb->read_data_transfer_length = 0;
5194 ioarcb->ioadl_len = 0;
5195 ioarcb->read_ioadl_len = 0;
5197 ioasa->residual_data_len = 0;
5199 if (ipr_cmd->ioa_cfg->sis64)
5200 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5201 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5203 ioarcb->write_ioadl_addr =
5204 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5205 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5210 * ipr_erp_request_sense - Send request sense to a device
5211 * @ipr_cmd: ipr command struct
5213 * This function sends a request sense to a device as a result
5214 * of a check condition.
5219 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5221 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5222 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5224 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5225 ipr_erp_done(ipr_cmd);
5229 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5231 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5232 cmd_pkt->cdb[0] = REQUEST_SENSE;
5233 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5234 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5235 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5236 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5238 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5239 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5241 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5242 IPR_REQUEST_SENSE_TIMEOUT * 2);
5246 * ipr_erp_cancel_all - Send cancel all to a device
5247 * @ipr_cmd: ipr command struct
5249 * This function sends a cancel all to a device to clear the
5250 * queue. If we are running TCQ on the device, QERR is set to 1,
5251 * which means all outstanding ops have been dropped on the floor.
5252 * Cancel all will return them to us.
5257 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5259 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5260 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5261 struct ipr_cmd_pkt *cmd_pkt;
5265 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5267 if (!scsi_get_tag_type(scsi_cmd->device)) {
5268 ipr_erp_request_sense(ipr_cmd);
5272 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5273 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5274 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5276 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5277 IPR_CANCEL_ALL_TIMEOUT);
5281 * ipr_dump_ioasa - Dump contents of IOASA
5282 * @ioa_cfg: ioa config struct
5283 * @ipr_cmd: ipr command struct
5284 * @res: resource entry struct
5286 * This function is invoked by the interrupt handler when ops
5287 * fail. It will log the IOASA if appropriate. Only called
5293 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5294 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5298 u32 ioasc, fd_ioasc;
5299 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5300 __be32 *ioasa_data = (__be32 *)ioasa;
5303 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
5304 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
5309 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5312 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5313 error_index = ipr_get_error(fd_ioasc);
5315 error_index = ipr_get_error(ioasc);
5317 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5318 /* Don't log an error if the IOA already logged one */
5319 if (ioasa->ilid != 0)
5322 if (!ipr_is_gscsi(res))
5325 if (ipr_error_table[error_index].log_ioasa == 0)
5329 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5331 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5332 data_len = sizeof(struct ipr_ioasa);
5334 data_len = be16_to_cpu(ioasa->ret_stat_len);
5336 ipr_err("IOASA Dump:\n");
5338 for (i = 0; i < data_len / 4; i += 4) {
5339 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5340 be32_to_cpu(ioasa_data[i]),
5341 be32_to_cpu(ioasa_data[i+1]),
5342 be32_to_cpu(ioasa_data[i+2]),
5343 be32_to_cpu(ioasa_data[i+3]));
5348 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5350 * @sense_buf: sense data buffer
5355 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5358 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5359 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5360 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5361 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5363 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5365 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5368 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5370 if (ipr_is_vset_device(res) &&
5371 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5372 ioasa->u.vset.failing_lba_hi != 0) {
5373 sense_buf[0] = 0x72;
5374 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5375 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5376 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5380 sense_buf[9] = 0x0A;
5381 sense_buf[10] = 0x80;
5383 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5385 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5386 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5387 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5388 sense_buf[15] = failing_lba & 0x000000ff;
5390 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5392 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5393 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5394 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5395 sense_buf[19] = failing_lba & 0x000000ff;
5397 sense_buf[0] = 0x70;
5398 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5399 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5400 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5402 /* Illegal request */
5403 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5404 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5405 sense_buf[7] = 10; /* additional length */
5407 /* IOARCB was in error */
5408 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5409 sense_buf[15] = 0xC0;
5410 else /* Parameter data was invalid */
5411 sense_buf[15] = 0x80;
5414 ((IPR_FIELD_POINTER_MASK &
5415 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5417 (IPR_FIELD_POINTER_MASK &
5418 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5420 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5421 if (ipr_is_vset_device(res))
5422 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5424 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5426 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5427 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5428 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5429 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5430 sense_buf[6] = failing_lba & 0x000000ff;
5433 sense_buf[7] = 6; /* additional length */
5439 * ipr_get_autosense - Copy autosense data to sense buffer
5440 * @ipr_cmd: ipr command struct
5442 * This function copies the autosense buffer to the buffer
5443 * in the scsi_cmd, if there is autosense available.
5446 * 1 if autosense was available / 0 if not
5448 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5450 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5452 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5455 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5456 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5457 SCSI_SENSE_BUFFERSIZE));
5462 * ipr_erp_start - Process an error response for a SCSI op
5463 * @ioa_cfg: ioa config struct
5464 * @ipr_cmd: ipr command struct
5466 * This function determines whether or not to initiate ERP
5467 * on the affected device.
5472 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5473 struct ipr_cmnd *ipr_cmd)
5475 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5476 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5477 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5478 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5481 ipr_scsi_eh_done(ipr_cmd);
5485 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5486 ipr_gen_sense(ipr_cmd);
5488 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5490 switch (masked_ioasc) {
5491 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5492 if (ipr_is_naca_model(res))
5493 scsi_cmd->result |= (DID_ABORT << 16);
5495 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5497 case IPR_IOASC_IR_RESOURCE_HANDLE:
5498 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5499 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5501 case IPR_IOASC_HW_SEL_TIMEOUT:
5502 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5503 if (!ipr_is_naca_model(res))
5504 res->needs_sync_complete = 1;
5506 case IPR_IOASC_SYNC_REQUIRED:
5508 res->needs_sync_complete = 1;
5509 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5511 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5512 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5513 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5515 case IPR_IOASC_BUS_WAS_RESET:
5516 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5518 * Report the bus reset and ask for a retry. The device
5519 * will give CC/UA the next command.
5521 if (!res->resetting_device)
5522 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5523 scsi_cmd->result |= (DID_ERROR << 16);
5524 if (!ipr_is_naca_model(res))
5525 res->needs_sync_complete = 1;
5527 case IPR_IOASC_HW_DEV_BUS_STATUS:
5528 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5529 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5530 if (!ipr_get_autosense(ipr_cmd)) {
5531 if (!ipr_is_naca_model(res)) {
5532 ipr_erp_cancel_all(ipr_cmd);
5537 if (!ipr_is_naca_model(res))
5538 res->needs_sync_complete = 1;
5540 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5543 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5544 scsi_cmd->result |= (DID_ERROR << 16);
5545 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5546 res->needs_sync_complete = 1;
5550 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5551 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5552 scsi_cmd->scsi_done(scsi_cmd);
5556 * ipr_scsi_done - mid-layer done function
5557 * @ipr_cmd: ipr command struct
5559 * This function is invoked by the interrupt handler for
5560 * ops generated by the SCSI mid-layer
5565 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5567 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5568 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5569 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5571 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
5573 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5574 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5575 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5576 scsi_cmd->scsi_done(scsi_cmd);
5578 ipr_erp_start(ioa_cfg, ipr_cmd);
5582 * ipr_queuecommand - Queue a mid-layer request
5583 * @scsi_cmd: scsi command struct
5584 * @done: done function
5586 * This function queues a request generated by the mid-layer.
5590 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5591 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5593 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5594 void (*done) (struct scsi_cmnd *))
5596 struct ipr_ioa_cfg *ioa_cfg;
5597 struct ipr_resource_entry *res;
5598 struct ipr_ioarcb *ioarcb;
5599 struct ipr_cmnd *ipr_cmd;
5602 scsi_cmd->scsi_done = done;
5603 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5604 res = scsi_cmd->device->hostdata;
5605 scsi_cmd->result = (DID_OK << 16);
5608 * We are currently blocking all devices due to a host reset
5609 * We have told the host to stop giving us new requests, but
5610 * ERP ops don't count. FIXME
5612 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5613 return SCSI_MLQUEUE_HOST_BUSY;
5616 * FIXME - Create scsi_set_host_offline interface
5617 * and the ioa_is_dead check can be removed
5619 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5620 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5621 scsi_cmd->result = (DID_NO_CONNECT << 16);
5622 scsi_cmd->scsi_done(scsi_cmd);
5626 if (ipr_is_gata(res) && res->sata_port)
5627 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5629 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5630 ioarcb = &ipr_cmd->ioarcb;
5631 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5633 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5634 ipr_cmd->scsi_cmd = scsi_cmd;
5635 ioarcb->res_handle = res->res_handle;
5636 ipr_cmd->done = ipr_scsi_done;
5637 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5639 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5640 if (scsi_cmd->underflow == 0)
5641 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5643 if (res->needs_sync_complete) {
5644 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5645 res->needs_sync_complete = 0;
5648 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5649 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5650 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5651 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5654 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5655 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5656 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5658 if (likely(rc == 0)) {
5660 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5662 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5665 if (likely(rc == 0)) {
5667 ipr_send_command(ipr_cmd);
5669 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5670 return SCSI_MLQUEUE_HOST_BUSY;
5677 * ipr_ioctl - IOCTL handler
5678 * @sdev: scsi device struct
5683 * 0 on success / other on failure
5685 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5687 struct ipr_resource_entry *res;
5689 res = (struct ipr_resource_entry *)sdev->hostdata;
5690 if (res && ipr_is_gata(res)) {
5691 if (cmd == HDIO_GET_IDENTITY)
5693 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5700 * ipr_info - Get information about the card/driver
5701 * @scsi_host: scsi host struct
5704 * pointer to buffer with description string
5706 static const char * ipr_ioa_info(struct Scsi_Host *host)
5708 static char buffer[512];
5709 struct ipr_ioa_cfg *ioa_cfg;
5710 unsigned long lock_flags = 0;
5712 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5714 spin_lock_irqsave(host->host_lock, lock_flags);
5715 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5716 spin_unlock_irqrestore(host->host_lock, lock_flags);
5721 static struct scsi_host_template driver_template = {
5722 .module = THIS_MODULE,
5724 .info = ipr_ioa_info,
5726 .queuecommand = ipr_queuecommand,
5727 .eh_abort_handler = ipr_eh_abort,
5728 .eh_device_reset_handler = ipr_eh_dev_reset,
5729 .eh_host_reset_handler = ipr_eh_host_reset,
5730 .slave_alloc = ipr_slave_alloc,
5731 .slave_configure = ipr_slave_configure,
5732 .slave_destroy = ipr_slave_destroy,
5733 .target_alloc = ipr_target_alloc,
5734 .target_destroy = ipr_target_destroy,
5735 .change_queue_depth = ipr_change_queue_depth,
5736 .change_queue_type = ipr_change_queue_type,
5737 .bios_param = ipr_biosparam,
5738 .can_queue = IPR_MAX_COMMANDS,
5740 .sg_tablesize = IPR_MAX_SGLIST,
5741 .max_sectors = IPR_IOA_MAX_SECTORS,
5742 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5743 .use_clustering = ENABLE_CLUSTERING,
5744 .shost_attrs = ipr_ioa_attrs,
5745 .sdev_attrs = ipr_dev_attrs,
5746 .proc_name = IPR_NAME
5750 * ipr_ata_phy_reset - libata phy_reset handler
5751 * @ap: ata port to reset
5754 static void ipr_ata_phy_reset(struct ata_port *ap)
5756 unsigned long flags;
5757 struct ipr_sata_port *sata_port = ap->private_data;
5758 struct ipr_resource_entry *res = sata_port->res;
5759 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5763 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5764 while(ioa_cfg->in_reset_reload) {
5765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5766 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5767 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5770 if (!ioa_cfg->allow_cmds)
5773 rc = ipr_device_reset(ioa_cfg, res);
5776 ata_port_disable(ap);
5780 ap->link.device[0].class = res->ata_class;
5781 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5782 ata_port_disable(ap);
5785 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5790 * ipr_ata_post_internal - Cleanup after an internal command
5791 * @qc: ATA queued command
5796 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5798 struct ipr_sata_port *sata_port = qc->ap->private_data;
5799 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5800 struct ipr_cmnd *ipr_cmd;
5801 unsigned long flags;
5803 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5804 while(ioa_cfg->in_reset_reload) {
5805 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5806 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5807 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5810 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5811 if (ipr_cmd->qc == qc) {
5812 ipr_device_reset(ioa_cfg, sata_port->res);
5816 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5820 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5821 * @regs: destination
5822 * @tf: source ATA taskfile
5827 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5828 struct ata_taskfile *tf)
5830 regs->feature = tf->feature;
5831 regs->nsect = tf->nsect;
5832 regs->lbal = tf->lbal;
5833 regs->lbam = tf->lbam;
5834 regs->lbah = tf->lbah;
5835 regs->device = tf->device;
5836 regs->command = tf->command;
5837 regs->hob_feature = tf->hob_feature;
5838 regs->hob_nsect = tf->hob_nsect;
5839 regs->hob_lbal = tf->hob_lbal;
5840 regs->hob_lbam = tf->hob_lbam;
5841 regs->hob_lbah = tf->hob_lbah;
5842 regs->ctl = tf->ctl;
5846 * ipr_sata_done - done function for SATA commands
5847 * @ipr_cmd: ipr command struct
5849 * This function is invoked by the interrupt handler for
5850 * ops generated by the SCSI mid-layer to SATA devices
5855 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5857 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5858 struct ata_queued_cmd *qc = ipr_cmd->qc;
5859 struct ipr_sata_port *sata_port = qc->ap->private_data;
5860 struct ipr_resource_entry *res = sata_port->res;
5861 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5863 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5864 sizeof(struct ipr_ioasa_gata));
5865 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5867 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5868 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5870 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5871 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5873 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5874 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5875 ata_qc_complete(qc);
5879 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5880 * @ipr_cmd: ipr command struct
5881 * @qc: ATA queued command
5884 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5885 struct ata_queued_cmd *qc)
5887 u32 ioadl_flags = 0;
5888 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5889 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5890 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5891 int len = qc->nbytes;
5892 struct scatterlist *sg;
5894 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5899 if (qc->dma_dir == DMA_TO_DEVICE) {
5900 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5901 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5902 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5903 ioadl_flags = IPR_IOADL_FLAGS_READ;
5905 ioarcb->data_transfer_length = cpu_to_be32(len);
5907 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5908 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5909 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5911 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5912 ioadl64->flags = cpu_to_be32(ioadl_flags);
5913 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5914 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5916 last_ioadl64 = ioadl64;
5920 if (likely(last_ioadl64))
5921 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5925 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5926 * @ipr_cmd: ipr command struct
5927 * @qc: ATA queued command
5930 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5931 struct ata_queued_cmd *qc)
5933 u32 ioadl_flags = 0;
5934 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5935 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5936 struct ipr_ioadl_desc *last_ioadl = NULL;
5937 int len = qc->nbytes;
5938 struct scatterlist *sg;
5944 if (qc->dma_dir == DMA_TO_DEVICE) {
5945 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5946 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5947 ioarcb->data_transfer_length = cpu_to_be32(len);
5949 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5950 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5951 ioadl_flags = IPR_IOADL_FLAGS_READ;
5952 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5953 ioarcb->read_ioadl_len =
5954 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5957 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5958 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5959 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5965 if (likely(last_ioadl))
5966 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5970 * ipr_qc_issue - Issue a SATA qc to a device
5971 * @qc: queued command
5976 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5978 struct ata_port *ap = qc->ap;
5979 struct ipr_sata_port *sata_port = ap->private_data;
5980 struct ipr_resource_entry *res = sata_port->res;
5981 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5982 struct ipr_cmnd *ipr_cmd;
5983 struct ipr_ioarcb *ioarcb;
5984 struct ipr_ioarcb_ata_regs *regs;
5986 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5987 return AC_ERR_SYSTEM;
5989 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5990 ioarcb = &ipr_cmd->ioarcb;
5992 if (ioa_cfg->sis64) {
5993 regs = &ipr_cmd->i.ata_ioadl.regs;
5994 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5996 regs = &ioarcb->u.add_data.u.regs;
5998 memset(regs, 0, sizeof(*regs));
5999 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6001 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6003 ipr_cmd->done = ipr_sata_done;
6004 ipr_cmd->ioarcb.res_handle = res->res_handle;
6005 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6006 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6007 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6008 ipr_cmd->dma_use_sg = qc->n_elem;
6011 ipr_build_ata_ioadl64(ipr_cmd, qc);
6013 ipr_build_ata_ioadl(ipr_cmd, qc);
6015 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6016 ipr_copy_sata_tf(regs, &qc->tf);
6017 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6018 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6020 switch (qc->tf.protocol) {
6021 case ATA_PROT_NODATA:
6026 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6029 case ATAPI_PROT_PIO:
6030 case ATAPI_PROT_NODATA:
6031 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6034 case ATAPI_PROT_DMA:
6035 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6036 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6041 return AC_ERR_INVALID;
6046 ipr_send_command(ipr_cmd);
6052 * ipr_qc_fill_rtf - Read result TF
6053 * @qc: ATA queued command
6058 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6060 struct ipr_sata_port *sata_port = qc->ap->private_data;
6061 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6062 struct ata_taskfile *tf = &qc->result_tf;
6064 tf->feature = g->error;
6065 tf->nsect = g->nsect;
6069 tf->device = g->device;
6070 tf->command = g->status;
6071 tf->hob_nsect = g->hob_nsect;
6072 tf->hob_lbal = g->hob_lbal;
6073 tf->hob_lbam = g->hob_lbam;
6074 tf->hob_lbah = g->hob_lbah;
6075 tf->ctl = g->alt_status;
6080 static struct ata_port_operations ipr_sata_ops = {
6081 .phy_reset = ipr_ata_phy_reset,
6082 .hardreset = ipr_sata_reset,
6083 .post_internal_cmd = ipr_ata_post_internal,
6084 .qc_prep = ata_noop_qc_prep,
6085 .qc_issue = ipr_qc_issue,
6086 .qc_fill_rtf = ipr_qc_fill_rtf,
6087 .port_start = ata_sas_port_start,
6088 .port_stop = ata_sas_port_stop
6091 static struct ata_port_info sata_port_info = {
6092 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6093 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6094 .pio_mask = 0x10, /* pio4 */
6096 .udma_mask = 0x7f, /* udma0-6 */
6097 .port_ops = &ipr_sata_ops
6100 #ifdef CONFIG_PPC_PSERIES
6101 static const u16 ipr_blocked_processors[] = {
6113 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6114 * @ioa_cfg: ioa cfg struct
6116 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6117 * certain pSeries hardware. This function determines if the given
6118 * adapter is in one of these confgurations or not.
6121 * 1 if adapter is not supported / 0 if adapter is supported
6123 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6127 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6128 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6129 if (__is_processor(ipr_blocked_processors[i]))
6136 #define ipr_invalid_adapter(ioa_cfg) 0
6140 * ipr_ioa_bringdown_done - IOA bring down completion.
6141 * @ipr_cmd: ipr command struct
6143 * This function processes the completion of an adapter bring down.
6144 * It wakes any reset sleepers.
6149 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6151 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6154 ioa_cfg->in_reset_reload = 0;
6155 ioa_cfg->reset_retries = 0;
6156 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6157 wake_up_all(&ioa_cfg->reset_wait_q);
6159 spin_unlock_irq(ioa_cfg->host->host_lock);
6160 scsi_unblock_requests(ioa_cfg->host);
6161 spin_lock_irq(ioa_cfg->host->host_lock);
6164 return IPR_RC_JOB_RETURN;
6168 * ipr_ioa_reset_done - IOA reset completion.
6169 * @ipr_cmd: ipr command struct
6171 * This function processes the completion of an adapter reset.
6172 * It schedules any necessary mid-layer add/removes and
6173 * wakes any reset sleepers.
6178 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6180 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6181 struct ipr_resource_entry *res;
6182 struct ipr_hostrcb *hostrcb, *temp;
6186 ioa_cfg->in_reset_reload = 0;
6187 ioa_cfg->allow_cmds = 1;
6188 ioa_cfg->reset_cmd = NULL;
6189 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6191 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6192 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6197 schedule_work(&ioa_cfg->work_q);
6199 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6200 list_del(&hostrcb->queue);
6201 if (i++ < IPR_NUM_LOG_HCAMS)
6202 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6204 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6207 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6208 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6210 ioa_cfg->reset_retries = 0;
6211 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6212 wake_up_all(&ioa_cfg->reset_wait_q);
6214 spin_unlock(ioa_cfg->host->host_lock);
6215 scsi_unblock_requests(ioa_cfg->host);
6216 spin_lock(ioa_cfg->host->host_lock);
6218 if (!ioa_cfg->allow_cmds)
6219 scsi_block_requests(ioa_cfg->host);
6222 return IPR_RC_JOB_RETURN;
6226 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6227 * @supported_dev: supported device struct
6228 * @vpids: vendor product id struct
6233 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6234 struct ipr_std_inq_vpids *vpids)
6236 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6237 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6238 supported_dev->num_records = 1;
6239 supported_dev->data_length =
6240 cpu_to_be16(sizeof(struct ipr_supported_device));
6241 supported_dev->reserved = 0;
6245 * ipr_set_supported_devs - Send Set Supported Devices for a device
6246 * @ipr_cmd: ipr command struct
6248 * This function sends a Set Supported Devices to the adapter
6251 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6253 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6255 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6256 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6257 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6258 struct ipr_resource_entry *res = ipr_cmd->u.res;
6260 ipr_cmd->job_step = ipr_ioa_reset_done;
6262 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6263 if (!ipr_is_scsi_disk(res))
6266 ipr_cmd->u.res = res;
6267 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6269 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6270 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6271 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6273 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6274 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6275 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6276 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6278 ipr_init_ioadl(ipr_cmd,
6279 ioa_cfg->vpd_cbs_dma +
6280 offsetof(struct ipr_misc_cbs, supp_dev),
6281 sizeof(struct ipr_supported_device),
6282 IPR_IOADL_FLAGS_WRITE_LAST);
6284 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6285 IPR_SET_SUP_DEVICE_TIMEOUT);
6287 if (!ioa_cfg->sis64)
6288 ipr_cmd->job_step = ipr_set_supported_devs;
6289 return IPR_RC_JOB_RETURN;
6292 return IPR_RC_JOB_CONTINUE;
6296 * ipr_setup_write_cache - Disable write cache if needed
6297 * @ipr_cmd: ipr command struct
6299 * This function sets up adapters write cache to desired setting
6302 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6304 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
6306 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6308 ipr_cmd->job_step = ipr_set_supported_devs;
6309 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6310 struct ipr_resource_entry, queue);
6312 if (ioa_cfg->cache_state != CACHE_DISABLED)
6313 return IPR_RC_JOB_CONTINUE;
6315 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6316 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6317 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6318 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
6320 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6322 return IPR_RC_JOB_RETURN;
6326 * ipr_get_mode_page - Locate specified mode page
6327 * @mode_pages: mode page buffer
6328 * @page_code: page code to find
6329 * @len: minimum required length for mode page
6332 * pointer to mode page / NULL on failure
6334 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6335 u32 page_code, u32 len)
6337 struct ipr_mode_page_hdr *mode_hdr;
6341 if (!mode_pages || (mode_pages->hdr.length == 0))
6344 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6345 mode_hdr = (struct ipr_mode_page_hdr *)
6346 (mode_pages->data + mode_pages->hdr.block_desc_len);
6349 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6350 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6354 page_length = (sizeof(struct ipr_mode_page_hdr) +
6355 mode_hdr->page_length);
6356 length -= page_length;
6357 mode_hdr = (struct ipr_mode_page_hdr *)
6358 ((unsigned long)mode_hdr + page_length);
6365 * ipr_check_term_power - Check for term power errors
6366 * @ioa_cfg: ioa config struct
6367 * @mode_pages: IOAFP mode pages buffer
6369 * Check the IOAFP's mode page 28 for term power errors
6374 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6375 struct ipr_mode_pages *mode_pages)
6379 struct ipr_dev_bus_entry *bus;
6380 struct ipr_mode_page28 *mode_page;
6382 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6383 sizeof(struct ipr_mode_page28));
6385 entry_length = mode_page->entry_length;
6387 bus = mode_page->bus;
6389 for (i = 0; i < mode_page->num_entries; i++) {
6390 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6391 dev_err(&ioa_cfg->pdev->dev,
6392 "Term power is absent on scsi bus %d\n",
6396 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6401 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6402 * @ioa_cfg: ioa config struct
6404 * Looks through the config table checking for SES devices. If
6405 * the SES device is in the SES table indicating a maximum SCSI
6406 * bus speed, the speed is limited for the bus.
6411 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6416 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6417 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6418 ioa_cfg->bus_attr[i].bus_width);
6420 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6421 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6426 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6427 * @ioa_cfg: ioa config struct
6428 * @mode_pages: mode page 28 buffer
6430 * Updates mode page 28 based on driver configuration
6435 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6436 struct ipr_mode_pages *mode_pages)
6438 int i, entry_length;
6439 struct ipr_dev_bus_entry *bus;
6440 struct ipr_bus_attributes *bus_attr;
6441 struct ipr_mode_page28 *mode_page;
6443 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6444 sizeof(struct ipr_mode_page28));
6446 entry_length = mode_page->entry_length;
6448 /* Loop for each device bus entry */
6449 for (i = 0, bus = mode_page->bus;
6450 i < mode_page->num_entries;
6451 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6452 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6453 dev_err(&ioa_cfg->pdev->dev,
6454 "Invalid resource address reported: 0x%08X\n",
6455 IPR_GET_PHYS_LOC(bus->res_addr));
6459 bus_attr = &ioa_cfg->bus_attr[i];
6460 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6461 bus->bus_width = bus_attr->bus_width;
6462 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6463 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6464 if (bus_attr->qas_enabled)
6465 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6467 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6472 * ipr_build_mode_select - Build a mode select command
6473 * @ipr_cmd: ipr command struct
6474 * @res_handle: resource handle to send command to
6475 * @parm: Byte 2 of Mode Sense command
6476 * @dma_addr: DMA buffer address
6477 * @xfer_len: data transfer length
6482 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6483 __be32 res_handle, u8 parm,
6484 dma_addr_t dma_addr, u8 xfer_len)
6486 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6488 ioarcb->res_handle = res_handle;
6489 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6490 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6491 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6492 ioarcb->cmd_pkt.cdb[1] = parm;
6493 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6495 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6499 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6500 * @ipr_cmd: ipr command struct
6502 * This function sets up the SCSI bus attributes and sends
6503 * a Mode Select for Page 28 to activate them.
6508 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6511 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6515 ipr_scsi_bus_speed_limit(ioa_cfg);
6516 ipr_check_term_power(ioa_cfg, mode_pages);
6517 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6518 length = mode_pages->hdr.length + 1;
6519 mode_pages->hdr.length = 0;
6521 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6522 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6525 ipr_cmd->job_step = ipr_setup_write_cache;
6526 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6529 return IPR_RC_JOB_RETURN;
6533 * ipr_build_mode_sense - Builds a mode sense command
6534 * @ipr_cmd: ipr command struct
6535 * @res: resource entry struct
6536 * @parm: Byte 2 of mode sense command
6537 * @dma_addr: DMA address of mode sense buffer
6538 * @xfer_len: Size of DMA buffer
6543 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6545 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6547 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6549 ioarcb->res_handle = res_handle;
6550 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6551 ioarcb->cmd_pkt.cdb[2] = parm;
6552 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6553 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6555 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6559 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6560 * @ipr_cmd: ipr command struct
6562 * This function handles the failure of an IOA bringup command.
6567 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6569 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6570 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6572 dev_err(&ioa_cfg->pdev->dev,
6573 "0x%02X failed with IOASC: 0x%08X\n",
6574 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6576 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6577 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6578 return IPR_RC_JOB_RETURN;
6582 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6583 * @ipr_cmd: ipr command struct
6585 * This function handles the failure of a Mode Sense to the IOAFP.
6586 * Some adapters do not handle all mode pages.
6589 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6591 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6593 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6595 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6596 ipr_cmd->job_step = ipr_setup_write_cache;
6597 return IPR_RC_JOB_CONTINUE;
6600 return ipr_reset_cmd_failed(ipr_cmd);
6604 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6605 * @ipr_cmd: ipr command struct
6607 * This function send a Page 28 mode sense to the IOA to
6608 * retrieve SCSI bus attributes.
6613 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6615 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6618 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6619 0x28, ioa_cfg->vpd_cbs_dma +
6620 offsetof(struct ipr_misc_cbs, mode_pages),
6621 sizeof(struct ipr_mode_pages));
6623 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6624 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6626 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6629 return IPR_RC_JOB_RETURN;
6633 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6634 * @ipr_cmd: ipr command struct
6636 * This function enables dual IOA RAID support if possible.
6641 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6643 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6644 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6645 struct ipr_mode_page24 *mode_page;
6649 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6650 sizeof(struct ipr_mode_page24));
6653 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6655 length = mode_pages->hdr.length + 1;
6656 mode_pages->hdr.length = 0;
6658 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6659 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6662 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6663 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6666 return IPR_RC_JOB_RETURN;
6670 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6671 * @ipr_cmd: ipr command struct
6673 * This function handles the failure of a Mode Sense to the IOAFP.
6674 * Some adapters do not handle all mode pages.
6677 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6679 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6681 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6683 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6684 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6685 return IPR_RC_JOB_CONTINUE;
6688 return ipr_reset_cmd_failed(ipr_cmd);
6692 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6693 * @ipr_cmd: ipr command struct
6695 * This function send a mode sense to the IOA to retrieve
6696 * the IOA Advanced Function Control mode page.
6701 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6703 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6706 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6707 0x24, ioa_cfg->vpd_cbs_dma +
6708 offsetof(struct ipr_misc_cbs, mode_pages),
6709 sizeof(struct ipr_mode_pages));
6711 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6712 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6714 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6717 return IPR_RC_JOB_RETURN;
6721 * ipr_init_res_table - Initialize the resource table
6722 * @ipr_cmd: ipr command struct
6724 * This function looks through the existing resource table, comparing
6725 * it with the config table. This function will take care of old/new
6726 * devices and schedule adding/removing them from the mid-layer
6730 * IPR_RC_JOB_CONTINUE
6732 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6735 struct ipr_resource_entry *res, *temp;
6736 struct ipr_config_table_entry_wrapper cfgtew;
6737 int entries, found, flag, i;
6742 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6744 flag = ioa_cfg->u.cfg_table->hdr.flags;
6746 if (flag & IPR_UCODE_DOWNLOAD_REQ)
6747 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6749 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6750 list_move_tail(&res->queue, &old_res);
6753 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6755 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6757 for (i = 0; i < entries; i++) {
6759 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6761 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6764 list_for_each_entry_safe(res, temp, &old_res, queue) {
6765 if (ipr_is_same_device(res, &cfgtew)) {
6766 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6773 if (list_empty(&ioa_cfg->free_res_q)) {
6774 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6779 res = list_entry(ioa_cfg->free_res_q.next,
6780 struct ipr_resource_entry, queue);
6781 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6782 ipr_init_res_entry(res, &cfgtew);
6787 ipr_update_res_entry(res, &cfgtew);
6790 list_for_each_entry_safe(res, temp, &old_res, queue) {
6792 res->del_from_ml = 1;
6793 res->res_handle = IPR_INVALID_RES_HANDLE;
6794 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6798 list_for_each_entry_safe(res, temp, &old_res, queue) {
6799 ipr_clear_res_target(res);
6800 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6803 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6804 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6806 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6809 return IPR_RC_JOB_CONTINUE;
6813 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6814 * @ipr_cmd: ipr command struct
6816 * This function sends a Query IOA Configuration command
6817 * to the adapter to retrieve the IOA configuration table.
6822 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6824 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6825 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6826 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6827 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6830 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6831 ioa_cfg->dual_raid = 1;
6832 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6833 ucode_vpd->major_release, ucode_vpd->card_type,
6834 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6835 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6836 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6838 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6839 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6840 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6842 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6843 IPR_IOADL_FLAGS_READ_LAST);
6845 ipr_cmd->job_step = ipr_init_res_table;
6847 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6850 return IPR_RC_JOB_RETURN;
6854 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6855 * @ipr_cmd: ipr command struct
6857 * This utility function sends an inquiry to the adapter.
6862 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6863 dma_addr_t dma_addr, u8 xfer_len)
6865 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6868 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6869 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6871 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6872 ioarcb->cmd_pkt.cdb[1] = flags;
6873 ioarcb->cmd_pkt.cdb[2] = page;
6874 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6876 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6878 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6883 * ipr_inquiry_page_supported - Is the given inquiry page supported
6884 * @page0: inquiry page 0 buffer
6887 * This function determines if the specified inquiry page is supported.
6890 * 1 if page is supported / 0 if not
6892 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6896 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6897 if (page0->page[i] == page)
6904 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6905 * @ipr_cmd: ipr command struct
6907 * This function sends a Page 0xD0 inquiry to the adapter
6908 * to retrieve adapter capabilities.
6911 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6913 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6915 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6916 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6917 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6920 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6921 memset(cap, 0, sizeof(*cap));
6923 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6924 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6925 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6926 sizeof(struct ipr_inquiry_cap));
6927 return IPR_RC_JOB_RETURN;
6931 return IPR_RC_JOB_CONTINUE;
6935 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6936 * @ipr_cmd: ipr command struct
6938 * This function sends a Page 3 inquiry to the adapter
6939 * to retrieve software VPD information.
6942 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6944 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6946 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6947 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6951 if (!ipr_inquiry_page_supported(page0, 1))
6952 ioa_cfg->cache_state = CACHE_NONE;
6954 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6956 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6957 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6958 sizeof(struct ipr_inquiry_page3));
6961 return IPR_RC_JOB_RETURN;
6965 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6966 * @ipr_cmd: ipr command struct
6968 * This function sends a Page 0 inquiry to the adapter
6969 * to retrieve supported inquiry pages.
6972 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6974 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6976 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6981 /* Grab the type out of the VPD and store it away */
6982 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6984 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6986 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6988 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6989 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6990 sizeof(struct ipr_inquiry_page0));
6993 return IPR_RC_JOB_RETURN;
6997 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6998 * @ipr_cmd: ipr command struct
7000 * This function sends a standard inquiry to the adapter.
7005 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7007 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7010 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7012 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7013 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7014 sizeof(struct ipr_ioa_vpd));
7017 return IPR_RC_JOB_RETURN;
7021 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
7022 * @ipr_cmd: ipr command struct
7024 * This function send an Identify Host Request Response Queue
7025 * command to establish the HRRQ with the adapter.
7030 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
7032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7033 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7036 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7038 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7039 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7041 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7042 ioarcb->cmd_pkt.cdb[2] =
7043 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7044 ioarcb->cmd_pkt.cdb[3] =
7045 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7046 ioarcb->cmd_pkt.cdb[4] =
7047 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7048 ioarcb->cmd_pkt.cdb[5] =
7049 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
7050 ioarcb->cmd_pkt.cdb[7] =
7051 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7052 ioarcb->cmd_pkt.cdb[8] =
7053 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7055 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7057 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7060 return IPR_RC_JOB_RETURN;
7064 * ipr_reset_timer_done - Adapter reset timer function
7065 * @ipr_cmd: ipr command struct
7067 * Description: This function is used in adapter reset processing
7068 * for timing events. If the reset_cmd pointer in the IOA
7069 * config struct is not this adapter's we are doing nested
7070 * resets and fail_all_ops will take care of freeing the
7076 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7078 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7079 unsigned long lock_flags = 0;
7081 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7083 if (ioa_cfg->reset_cmd == ipr_cmd) {
7084 list_del(&ipr_cmd->queue);
7085 ipr_cmd->done(ipr_cmd);
7088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7092 * ipr_reset_start_timer - Start a timer for adapter reset job
7093 * @ipr_cmd: ipr command struct
7094 * @timeout: timeout value
7096 * Description: This function is used in adapter reset processing
7097 * for timing events. If the reset_cmd pointer in the IOA
7098 * config struct is not this adapter's we are doing nested
7099 * resets and fail_all_ops will take care of freeing the
7105 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7106 unsigned long timeout)
7108 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7109 ipr_cmd->done = ipr_reset_ioa_job;
7111 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7112 ipr_cmd->timer.expires = jiffies + timeout;
7113 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7114 add_timer(&ipr_cmd->timer);
7118 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7119 * @ioa_cfg: ioa cfg struct
7124 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7126 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7128 /* Initialize Host RRQ pointers */
7129 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7130 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7131 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7132 ioa_cfg->toggle_bit = 1;
7134 /* Zero out config table */
7135 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7139 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7140 * @ipr_cmd: ipr command struct
7142 * This function reinitializes some control blocks and
7143 * enables destructive diagnostics on the adapter.
7148 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7150 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7151 volatile u32 int_reg;
7154 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
7155 ipr_init_ioa_mem(ioa_cfg);
7157 ioa_cfg->allow_interrupts = 1;
7158 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7160 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7161 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7162 ioa_cfg->regs.clr_interrupt_mask_reg);
7163 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7164 return IPR_RC_JOB_CONTINUE;
7167 /* Enable destructive diagnostics on IOA */
7168 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
7170 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
7171 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7173 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7175 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7176 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7177 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7178 ipr_cmd->done = ipr_reset_ioa_job;
7179 add_timer(&ipr_cmd->timer);
7180 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7183 return IPR_RC_JOB_RETURN;
7187 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7188 * @ipr_cmd: ipr command struct
7190 * This function is invoked when an adapter dump has run out
7191 * of processing time.
7194 * IPR_RC_JOB_CONTINUE
7196 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7200 if (ioa_cfg->sdt_state == GET_DUMP)
7201 ioa_cfg->sdt_state = ABORT_DUMP;
7203 ipr_cmd->job_step = ipr_reset_alert;
7205 return IPR_RC_JOB_CONTINUE;
7209 * ipr_unit_check_no_data - Log a unit check/no data error log
7210 * @ioa_cfg: ioa config struct
7212 * Logs an error indicating the adapter unit checked, but for some
7213 * reason, we were unable to fetch the unit check buffer.
7218 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7220 ioa_cfg->errors_logged++;
7221 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7225 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7226 * @ioa_cfg: ioa config struct
7228 * Fetches the unit check buffer from the adapter by clocking the data
7229 * through the mailbox register.
7234 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7236 unsigned long mailbox;
7237 struct ipr_hostrcb *hostrcb;
7238 struct ipr_uc_sdt sdt;
7242 mailbox = readl(ioa_cfg->ioa_mailbox);
7244 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7245 ipr_unit_check_no_data(ioa_cfg);
7249 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7250 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7251 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7253 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7254 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7255 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7256 ipr_unit_check_no_data(ioa_cfg);
7260 /* Find length of the first sdt entry (UC buffer) */
7261 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7262 length = be32_to_cpu(sdt.entry[0].end_token);
7264 length = (be32_to_cpu(sdt.entry[0].end_token) -
7265 be32_to_cpu(sdt.entry[0].start_token)) &
7266 IPR_FMT2_MBX_ADDR_MASK;
7268 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7269 struct ipr_hostrcb, queue);
7270 list_del(&hostrcb->queue);
7271 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7273 rc = ipr_get_ldump_data_section(ioa_cfg,
7274 be32_to_cpu(sdt.entry[0].start_token),
7275 (__be32 *)&hostrcb->hcam,
7276 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7279 ipr_handle_log_data(ioa_cfg, hostrcb);
7280 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7281 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7282 ioa_cfg->sdt_state == GET_DUMP)
7283 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7285 ipr_unit_check_no_data(ioa_cfg);
7287 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7291 * ipr_reset_restore_cfg_space - Restore PCI config space.
7292 * @ipr_cmd: ipr command struct
7294 * Description: This function restores the saved PCI config space of
7295 * the adapter, fails all outstanding ops back to the callers, and
7296 * fetches the dump/unit check if applicable to this reset.
7299 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7301 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7303 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7307 ioa_cfg->pdev->state_saved = true;
7308 rc = pci_restore_state(ioa_cfg->pdev);
7310 if (rc != PCIBIOS_SUCCESSFUL) {
7311 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7312 return IPR_RC_JOB_CONTINUE;
7315 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7316 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7317 return IPR_RC_JOB_CONTINUE;
7320 ipr_fail_all_ops(ioa_cfg);
7322 if (ioa_cfg->ioa_unit_checked) {
7323 ioa_cfg->ioa_unit_checked = 0;
7324 ipr_get_unit_check_buffer(ioa_cfg);
7325 ipr_cmd->job_step = ipr_reset_alert;
7326 ipr_reset_start_timer(ipr_cmd, 0);
7327 return IPR_RC_JOB_RETURN;
7330 if (ioa_cfg->in_ioa_bringdown) {
7331 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7333 ipr_cmd->job_step = ipr_reset_enable_ioa;
7335 if (GET_DUMP == ioa_cfg->sdt_state) {
7336 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7337 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7338 schedule_work(&ioa_cfg->work_q);
7339 return IPR_RC_JOB_RETURN;
7344 return IPR_RC_JOB_CONTINUE;
7348 * ipr_reset_bist_done - BIST has completed on the adapter.
7349 * @ipr_cmd: ipr command struct
7351 * Description: Unblock config space and resume the reset process.
7354 * IPR_RC_JOB_CONTINUE
7356 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7359 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7360 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7362 return IPR_RC_JOB_CONTINUE;
7366 * ipr_reset_start_bist - Run BIST on the adapter.
7367 * @ipr_cmd: ipr command struct
7369 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7372 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7374 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7376 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7380 pci_block_user_cfg_access(ioa_cfg->pdev);
7381 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7383 if (rc != PCIBIOS_SUCCESSFUL) {
7384 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7385 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7386 rc = IPR_RC_JOB_CONTINUE;
7388 ipr_cmd->job_step = ipr_reset_bist_done;
7389 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7390 rc = IPR_RC_JOB_RETURN;
7398 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7399 * @ipr_cmd: ipr command struct
7401 * Description: This clears PCI reset to the adapter and delays two seconds.
7406 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7409 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7410 ipr_cmd->job_step = ipr_reset_bist_done;
7411 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7413 return IPR_RC_JOB_RETURN;
7417 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7418 * @ipr_cmd: ipr command struct
7420 * Description: This asserts PCI reset to the adapter.
7425 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7428 struct pci_dev *pdev = ioa_cfg->pdev;
7431 pci_block_user_cfg_access(pdev);
7432 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7433 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7434 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7436 return IPR_RC_JOB_RETURN;
7440 * ipr_reset_allowed - Query whether or not IOA can be reset
7441 * @ioa_cfg: ioa config struct
7444 * 0 if reset not allowed / non-zero if reset is allowed
7446 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7448 volatile u32 temp_reg;
7450 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7451 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7455 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7456 * @ipr_cmd: ipr command struct
7458 * Description: This function waits for adapter permission to run BIST,
7459 * then runs BIST. If the adapter does not give permission after a
7460 * reasonable time, we will reset the adapter anyway. The impact of
7461 * resetting the adapter without warning the adapter is the risk of
7462 * losing the persistent error log on the adapter. If the adapter is
7463 * reset while it is writing to the flash on the adapter, the flash
7464 * segment will have bad ECC and be zeroed.
7467 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7469 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7472 int rc = IPR_RC_JOB_RETURN;
7474 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7475 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7476 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7478 ipr_cmd->job_step = ioa_cfg->reset;
7479 rc = IPR_RC_JOB_CONTINUE;
7486 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7487 * @ipr_cmd: ipr command struct
7489 * Description: This function alerts the adapter that it will be reset.
7490 * If memory space is not currently enabled, proceed directly
7491 * to running BIST on the adapter. The timer must always be started
7492 * so we guarantee we do not run BIST from ipr_isr.
7497 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7504 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7506 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7507 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7508 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
7509 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7511 ipr_cmd->job_step = ioa_cfg->reset;
7514 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7515 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7518 return IPR_RC_JOB_RETURN;
7522 * ipr_reset_ucode_download_done - Microcode download completion
7523 * @ipr_cmd: ipr command struct
7525 * Description: This function unmaps the microcode download buffer.
7528 * IPR_RC_JOB_CONTINUE
7530 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7532 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7533 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7535 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7536 sglist->num_sg, DMA_TO_DEVICE);
7538 ipr_cmd->job_step = ipr_reset_alert;
7539 return IPR_RC_JOB_CONTINUE;
7543 * ipr_reset_ucode_download - Download microcode to the adapter
7544 * @ipr_cmd: ipr command struct
7546 * Description: This function checks to see if it there is microcode
7547 * to download to the adapter. If there is, a download is performed.
7550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7552 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7554 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7555 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7558 ipr_cmd->job_step = ipr_reset_alert;
7561 return IPR_RC_JOB_CONTINUE;
7563 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7564 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7565 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7566 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7567 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7568 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7569 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7572 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7574 ipr_build_ucode_ioadl(ipr_cmd, sglist);
7575 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7577 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7578 IPR_WRITE_BUFFER_TIMEOUT);
7581 return IPR_RC_JOB_RETURN;
7585 * ipr_reset_shutdown_ioa - Shutdown the adapter
7586 * @ipr_cmd: ipr command struct
7588 * Description: This function issues an adapter shutdown of the
7589 * specified type to the specified adapter as part of the
7590 * adapter reset job.
7593 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7595 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7598 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7599 unsigned long timeout;
7600 int rc = IPR_RC_JOB_CONTINUE;
7603 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7604 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7605 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7606 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7607 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7609 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7610 timeout = IPR_SHUTDOWN_TIMEOUT;
7611 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7612 timeout = IPR_INTERNAL_TIMEOUT;
7613 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7614 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7616 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7618 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7620 rc = IPR_RC_JOB_RETURN;
7621 ipr_cmd->job_step = ipr_reset_ucode_download;
7623 ipr_cmd->job_step = ipr_reset_alert;
7630 * ipr_reset_ioa_job - Adapter reset job
7631 * @ipr_cmd: ipr command struct
7633 * Description: This function is the job router for the adapter reset job.
7638 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7641 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7644 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7646 if (ioa_cfg->reset_cmd != ipr_cmd) {
7648 * We are doing nested adapter resets and this is
7649 * not the current reset job.
7651 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7655 if (IPR_IOASC_SENSE_KEY(ioasc)) {
7656 rc = ipr_cmd->job_step_failed(ipr_cmd);
7657 if (rc == IPR_RC_JOB_RETURN)
7661 ipr_reinit_ipr_cmnd(ipr_cmd);
7662 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7663 rc = ipr_cmd->job_step(ipr_cmd);
7664 } while(rc == IPR_RC_JOB_CONTINUE);
7668 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7669 * @ioa_cfg: ioa config struct
7670 * @job_step: first job step of reset job
7671 * @shutdown_type: shutdown type
7673 * Description: This function will initiate the reset of the given adapter
7674 * starting at the selected job step.
7675 * If the caller needs to wait on the completion of the reset,
7676 * the caller must sleep on the reset_wait_q.
7681 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7682 int (*job_step) (struct ipr_cmnd *),
7683 enum ipr_shutdown_type shutdown_type)
7685 struct ipr_cmnd *ipr_cmd;
7687 ioa_cfg->in_reset_reload = 1;
7688 ioa_cfg->allow_cmds = 0;
7689 scsi_block_requests(ioa_cfg->host);
7691 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7692 ioa_cfg->reset_cmd = ipr_cmd;
7693 ipr_cmd->job_step = job_step;
7694 ipr_cmd->u.shutdown_type = shutdown_type;
7696 ipr_reset_ioa_job(ipr_cmd);
7700 * ipr_initiate_ioa_reset - Initiate an adapter reset
7701 * @ioa_cfg: ioa config struct
7702 * @shutdown_type: shutdown type
7704 * Description: This function will initiate the reset of the given adapter.
7705 * If the caller needs to wait on the completion of the reset,
7706 * the caller must sleep on the reset_wait_q.
7711 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7712 enum ipr_shutdown_type shutdown_type)
7714 if (ioa_cfg->ioa_is_dead)
7717 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7718 ioa_cfg->sdt_state = ABORT_DUMP;
7720 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7721 dev_err(&ioa_cfg->pdev->dev,
7722 "IOA taken offline - error recovery failed\n");
7724 ioa_cfg->reset_retries = 0;
7725 ioa_cfg->ioa_is_dead = 1;
7727 if (ioa_cfg->in_ioa_bringdown) {
7728 ioa_cfg->reset_cmd = NULL;
7729 ioa_cfg->in_reset_reload = 0;
7730 ipr_fail_all_ops(ioa_cfg);
7731 wake_up_all(&ioa_cfg->reset_wait_q);
7733 spin_unlock_irq(ioa_cfg->host->host_lock);
7734 scsi_unblock_requests(ioa_cfg->host);
7735 spin_lock_irq(ioa_cfg->host->host_lock);
7738 ioa_cfg->in_ioa_bringdown = 1;
7739 shutdown_type = IPR_SHUTDOWN_NONE;
7743 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7748 * ipr_reset_freeze - Hold off all I/O activity
7749 * @ipr_cmd: ipr command struct
7751 * Description: If the PCI slot is frozen, hold off all I/O
7752 * activity; then, as soon as the slot is available again,
7753 * initiate an adapter reset.
7755 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7757 /* Disallow new interrupts, avoid loop */
7758 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7759 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7760 ipr_cmd->done = ipr_reset_ioa_job;
7761 return IPR_RC_JOB_RETURN;
7765 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7766 * @pdev: PCI device struct
7768 * Description: This routine is called to tell us that the PCI bus
7769 * is down. Can't do anything here, except put the device driver
7770 * into a holding pattern, waiting for the PCI bus to come back.
7772 static void ipr_pci_frozen(struct pci_dev *pdev)
7774 unsigned long flags = 0;
7775 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7778 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7779 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7783 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7784 * @pdev: PCI device struct
7786 * Description: This routine is called by the pci error recovery
7787 * code after the PCI slot has been reset, just before we
7788 * should resume normal operations.
7790 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7792 unsigned long flags = 0;
7793 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7795 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7796 if (ioa_cfg->needs_warm_reset)
7797 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7799 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7802 return PCI_ERS_RESULT_RECOVERED;
7806 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7807 * @pdev: PCI device struct
7809 * Description: This routine is called when the PCI bus has
7810 * permanently failed.
7812 static void ipr_pci_perm_failure(struct pci_dev *pdev)
7814 unsigned long flags = 0;
7815 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7817 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7818 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7819 ioa_cfg->sdt_state = ABORT_DUMP;
7820 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7821 ioa_cfg->in_ioa_bringdown = 1;
7822 ioa_cfg->allow_cmds = 0;
7823 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7828 * ipr_pci_error_detected - Called when a PCI error is detected.
7829 * @pdev: PCI device struct
7830 * @state: PCI channel state
7832 * Description: Called when a PCI error is detected.
7835 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7837 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7838 pci_channel_state_t state)
7841 case pci_channel_io_frozen:
7842 ipr_pci_frozen(pdev);
7843 return PCI_ERS_RESULT_NEED_RESET;
7844 case pci_channel_io_perm_failure:
7845 ipr_pci_perm_failure(pdev);
7846 return PCI_ERS_RESULT_DISCONNECT;
7851 return PCI_ERS_RESULT_NEED_RESET;
7855 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7856 * @ioa_cfg: ioa cfg struct
7858 * Description: This is the second phase of adapter intialization
7859 * This function takes care of initilizing the adapter to the point
7860 * where it can accept new commands.
7863 * 0 on success / -EIO on failure
7865 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7868 unsigned long host_lock_flags = 0;
7871 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7872 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7873 if (ioa_cfg->needs_hard_reset) {
7874 ioa_cfg->needs_hard_reset = 0;
7875 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7877 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7881 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7882 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7884 if (ioa_cfg->ioa_is_dead) {
7886 } else if (ipr_invalid_adapter(ioa_cfg)) {
7890 dev_err(&ioa_cfg->pdev->dev,
7891 "Adapter not supported in this hardware configuration.\n");
7894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7901 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7902 * @ioa_cfg: ioa config struct
7907 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7911 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7912 if (ioa_cfg->ipr_cmnd_list[i])
7913 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7914 ioa_cfg->ipr_cmnd_list[i],
7915 ioa_cfg->ipr_cmnd_list_dma[i]);
7917 ioa_cfg->ipr_cmnd_list[i] = NULL;
7920 if (ioa_cfg->ipr_cmd_pool)
7921 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7923 ioa_cfg->ipr_cmd_pool = NULL;
7927 * ipr_free_mem - Frees memory allocated for an adapter
7928 * @ioa_cfg: ioa cfg struct
7933 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7937 kfree(ioa_cfg->res_entries);
7938 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7939 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7940 ipr_free_cmd_blks(ioa_cfg);
7941 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7942 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7943 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7944 ioa_cfg->u.cfg_table,
7945 ioa_cfg->cfg_table_dma);
7947 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7948 pci_free_consistent(ioa_cfg->pdev,
7949 sizeof(struct ipr_hostrcb),
7950 ioa_cfg->hostrcb[i],
7951 ioa_cfg->hostrcb_dma[i]);
7954 ipr_free_dump(ioa_cfg);
7955 kfree(ioa_cfg->trace);
7959 * ipr_free_all_resources - Free all allocated resources for an adapter.
7960 * @ipr_cmd: ipr command struct
7962 * This function frees all allocated resources for the
7963 * specified adapter.
7968 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7970 struct pci_dev *pdev = ioa_cfg->pdev;
7973 free_irq(pdev->irq, ioa_cfg);
7974 pci_disable_msi(pdev);
7975 iounmap(ioa_cfg->hdw_dma_regs);
7976 pci_release_regions(pdev);
7977 ipr_free_mem(ioa_cfg);
7978 scsi_host_put(ioa_cfg->host);
7979 pci_disable_device(pdev);
7984 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7985 * @ioa_cfg: ioa config struct
7988 * 0 on success / -ENOMEM on allocation failure
7990 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7992 struct ipr_cmnd *ipr_cmd;
7993 struct ipr_ioarcb *ioarcb;
7994 dma_addr_t dma_addr;
7997 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7998 sizeof(struct ipr_cmnd), 16, 0);
8000 if (!ioa_cfg->ipr_cmd_pool)
8003 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8004 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8007 ipr_free_cmd_blks(ioa_cfg);
8011 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8012 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8013 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8015 ioarcb = &ipr_cmd->ioarcb;
8016 ipr_cmd->dma_addr = dma_addr;
8018 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8020 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8022 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8023 if (ioa_cfg->sis64) {
8024 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8025 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8026 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8027 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8029 ioarcb->write_ioadl_addr =
8030 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8031 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8032 ioarcb->ioasa_host_pci_addr =
8033 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8035 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8036 ipr_cmd->cmd_index = i;
8037 ipr_cmd->ioa_cfg = ioa_cfg;
8038 ipr_cmd->sense_buffer_dma = dma_addr +
8039 offsetof(struct ipr_cmnd, sense_buffer);
8041 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8048 * ipr_alloc_mem - Allocate memory for an adapter
8049 * @ioa_cfg: ioa config struct
8052 * 0 on success / non-zero for error
8054 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8056 struct pci_dev *pdev = ioa_cfg->pdev;
8057 int i, rc = -ENOMEM;
8060 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8061 ioa_cfg->max_devs_supported, GFP_KERNEL);
8063 if (!ioa_cfg->res_entries)
8066 if (ioa_cfg->sis64) {
8067 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8068 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8069 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8070 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8071 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8072 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8075 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8076 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8077 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8080 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8081 sizeof(struct ipr_misc_cbs),
8082 &ioa_cfg->vpd_cbs_dma);
8084 if (!ioa_cfg->vpd_cbs)
8085 goto out_free_res_entries;
8087 if (ipr_alloc_cmd_blks(ioa_cfg))
8088 goto out_free_vpd_cbs;
8090 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8091 sizeof(u32) * IPR_NUM_CMD_BLKS,
8092 &ioa_cfg->host_rrq_dma);
8094 if (!ioa_cfg->host_rrq)
8095 goto out_ipr_free_cmd_blocks;
8097 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8098 ioa_cfg->cfg_table_size,
8099 &ioa_cfg->cfg_table_dma);
8101 if (!ioa_cfg->u.cfg_table)
8102 goto out_free_host_rrq;
8104 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8105 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8106 sizeof(struct ipr_hostrcb),
8107 &ioa_cfg->hostrcb_dma[i]);
8109 if (!ioa_cfg->hostrcb[i])
8110 goto out_free_hostrcb_dma;
8112 ioa_cfg->hostrcb[i]->hostrcb_dma =
8113 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8114 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8115 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8118 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8119 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8121 if (!ioa_cfg->trace)
8122 goto out_free_hostrcb_dma;
8129 out_free_hostrcb_dma:
8131 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8132 ioa_cfg->hostrcb[i],
8133 ioa_cfg->hostrcb_dma[i]);
8135 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8136 ioa_cfg->u.cfg_table,
8137 ioa_cfg->cfg_table_dma);
8139 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8140 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8141 out_ipr_free_cmd_blocks:
8142 ipr_free_cmd_blks(ioa_cfg);
8144 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8145 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8146 out_free_res_entries:
8147 kfree(ioa_cfg->res_entries);
8152 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8153 * @ioa_cfg: ioa config struct
8158 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8162 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8163 ioa_cfg->bus_attr[i].bus = i;
8164 ioa_cfg->bus_attr[i].qas_enabled = 0;
8165 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8166 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8167 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8169 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8174 * ipr_init_ioa_cfg - Initialize IOA config struct
8175 * @ioa_cfg: ioa config struct
8176 * @host: scsi host struct
8177 * @pdev: PCI dev struct
8182 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8183 struct Scsi_Host *host, struct pci_dev *pdev)
8185 const struct ipr_interrupt_offsets *p;
8186 struct ipr_interrupts *t;
8189 ioa_cfg->host = host;
8190 ioa_cfg->pdev = pdev;
8191 ioa_cfg->log_level = ipr_log_level;
8192 ioa_cfg->doorbell = IPR_DOORBELL;
8193 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8194 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8195 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8196 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8197 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8198 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8199 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8200 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8202 INIT_LIST_HEAD(&ioa_cfg->free_q);
8203 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8204 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8205 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8206 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8207 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8208 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8209 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8210 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8211 ioa_cfg->sdt_state = INACTIVE;
8212 if (ipr_enable_cache)
8213 ioa_cfg->cache_state = CACHE_ENABLED;
8215 ioa_cfg->cache_state = CACHE_DISABLED;
8217 ipr_initialize_bus_attr(ioa_cfg);
8218 ioa_cfg->max_devs_supported = ipr_max_devs;
8220 if (ioa_cfg->sis64) {
8221 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8222 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8223 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8224 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8226 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8227 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8228 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8229 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8231 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8232 host->unique_id = host->host_no;
8233 host->max_cmd_len = IPR_MAX_CDB_LEN;
8234 pci_set_drvdata(pdev, ioa_cfg);
8236 p = &ioa_cfg->chip_cfg->regs;
8238 base = ioa_cfg->hdw_dma_regs;
8240 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8241 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8242 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8243 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8244 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8245 t->ioarrin_reg = base + p->ioarrin_reg;
8246 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8247 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8248 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8250 if (ioa_cfg->sis64) {
8251 t->dump_addr_reg = base + p->dump_addr_reg;
8252 t->dump_data_reg = base + p->dump_data_reg;
8257 * ipr_get_chip_info - Find adapter chip information
8258 * @dev_id: PCI device id struct
8261 * ptr to chip information on success / NULL on failure
8263 static const struct ipr_chip_t * __devinit
8264 ipr_get_chip_info(const struct pci_device_id *dev_id)
8268 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8269 if (ipr_chip[i].vendor == dev_id->vendor &&
8270 ipr_chip[i].device == dev_id->device)
8271 return &ipr_chip[i];
8276 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8277 * @pdev: PCI device struct
8279 * Description: Simply set the msi_received flag to 1 indicating that
8280 * Message Signaled Interrupts are supported.
8283 * 0 on success / non-zero on failure
8285 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8287 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8288 unsigned long lock_flags = 0;
8289 irqreturn_t rc = IRQ_HANDLED;
8291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8293 ioa_cfg->msi_received = 1;
8294 wake_up(&ioa_cfg->msi_wait_q);
8296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8301 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8302 * @pdev: PCI device struct
8304 * Description: The return value from pci_enable_msi() can not always be
8305 * trusted. This routine sets up and initiates a test interrupt to determine
8306 * if the interrupt is received via the ipr_test_intr() service routine.
8307 * If the tests fails, the driver will fall back to LSI.
8310 * 0 on success / non-zero on failure
8312 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8313 struct pci_dev *pdev)
8316 volatile u32 int_reg;
8317 unsigned long lock_flags = 0;
8321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8322 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8323 ioa_cfg->msi_received = 0;
8324 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8325 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
8326 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8329 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8331 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8333 } else if (ipr_debug)
8334 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8336 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
8337 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8338 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8339 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8342 if (!ioa_cfg->msi_received) {
8343 /* MSI test failed */
8344 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8346 } else if (ipr_debug)
8347 dev_info(&pdev->dev, "MSI test succeeded.\n");
8349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8351 free_irq(pdev->irq, ioa_cfg);
8359 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8360 * @pdev: PCI device struct
8361 * @dev_id: PCI device id struct
8364 * 0 on success / non-zero on failure
8366 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8367 const struct pci_device_id *dev_id)
8369 struct ipr_ioa_cfg *ioa_cfg;
8370 struct Scsi_Host *host;
8371 unsigned long ipr_regs_pci;
8372 void __iomem *ipr_regs;
8373 int rc = PCIBIOS_SUCCESSFUL;
8374 volatile u32 mask, uproc, interrupts;
8378 if ((rc = pci_enable_device(pdev))) {
8379 dev_err(&pdev->dev, "Cannot enable adapter\n");
8383 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8385 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8388 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8393 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8394 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8395 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8396 sata_port_info.flags, &ipr_sata_ops);
8398 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8400 if (!ioa_cfg->ipr_chip) {
8401 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8402 dev_id->vendor, dev_id->device);
8403 goto out_scsi_host_put;
8406 /* set SIS 32 or SIS 64 */
8407 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8408 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8410 if (ipr_transop_timeout)
8411 ioa_cfg->transop_timeout = ipr_transop_timeout;
8412 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8413 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8415 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8417 ioa_cfg->revid = pdev->revision;
8419 ipr_regs_pci = pci_resource_start(pdev, 0);
8421 rc = pci_request_regions(pdev, IPR_NAME);
8424 "Couldn't register memory range of registers\n");
8425 goto out_scsi_host_put;
8428 ipr_regs = pci_ioremap_bar(pdev, 0);
8432 "Couldn't map memory range of registers\n");
8434 goto out_release_regions;
8437 ioa_cfg->hdw_dma_regs = ipr_regs;
8438 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8439 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8441 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8443 pci_set_master(pdev);
8445 if (ioa_cfg->sis64) {
8446 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8448 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8449 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8453 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8456 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8460 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8461 ioa_cfg->chip_cfg->cache_line_size);
8463 if (rc != PCIBIOS_SUCCESSFUL) {
8464 dev_err(&pdev->dev, "Write of cache line size failed\n");
8469 /* Enable MSI style interrupts if they are supported. */
8470 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8471 rc = ipr_test_msi(ioa_cfg, pdev);
8472 if (rc == -EOPNOTSUPP)
8473 pci_disable_msi(pdev);
8475 goto out_msi_disable;
8477 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8478 } else if (ipr_debug)
8479 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8481 /* Save away PCI config space for use following IOA reset */
8482 rc = pci_save_state(pdev);
8484 if (rc != PCIBIOS_SUCCESSFUL) {
8485 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8490 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8493 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8497 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8498 + ((sizeof(struct ipr_config_table_entry64)
8499 * ioa_cfg->max_devs_supported)));
8501 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8502 + ((sizeof(struct ipr_config_table_entry)
8503 * ioa_cfg->max_devs_supported)));
8505 rc = ipr_alloc_mem(ioa_cfg);
8508 "Couldn't allocate enough memory for device driver!\n");
8513 * If HRRQ updated interrupt is not masked, or reset alert is set,
8514 * the card is in an unknown state and needs a hard reset
8516 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8517 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
8518 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
8519 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8520 ioa_cfg->needs_hard_reset = 1;
8521 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8522 ioa_cfg->needs_hard_reset = 1;
8523 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8524 ioa_cfg->ioa_unit_checked = 1;
8526 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8527 rc = request_irq(pdev->irq, ipr_isr,
8528 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8532 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8537 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8538 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8539 ioa_cfg->needs_warm_reset = 1;
8540 ioa_cfg->reset = ipr_reset_slot_reset;
8542 ioa_cfg->reset = ipr_reset_start_bist;
8544 spin_lock(&ipr_driver_lock);
8545 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8546 spin_unlock(&ipr_driver_lock);
8553 ipr_free_mem(ioa_cfg);
8557 pci_disable_msi(pdev);
8558 out_release_regions:
8559 pci_release_regions(pdev);
8561 scsi_host_put(host);
8563 pci_disable_device(pdev);
8568 * ipr_scan_vsets - Scans for VSET devices
8569 * @ioa_cfg: ioa config struct
8571 * Description: Since the VSET resources do not follow SAM in that we can have
8572 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8577 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8581 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8582 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8583 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8587 * ipr_initiate_ioa_bringdown - Bring down an adapter
8588 * @ioa_cfg: ioa config struct
8589 * @shutdown_type: shutdown type
8591 * Description: This function will initiate bringing down the adapter.
8592 * This consists of issuing an IOA shutdown to the adapter
8593 * to flush the cache, and running BIST.
8594 * If the caller needs to wait on the completion of the reset,
8595 * the caller must sleep on the reset_wait_q.
8600 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8601 enum ipr_shutdown_type shutdown_type)
8604 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8605 ioa_cfg->sdt_state = ABORT_DUMP;
8606 ioa_cfg->reset_retries = 0;
8607 ioa_cfg->in_ioa_bringdown = 1;
8608 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8613 * __ipr_remove - Remove a single adapter
8614 * @pdev: pci device struct
8616 * Adapter hot plug remove entry point.
8621 static void __ipr_remove(struct pci_dev *pdev)
8623 unsigned long host_lock_flags = 0;
8624 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8627 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8628 while(ioa_cfg->in_reset_reload) {
8629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8630 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8631 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8634 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8637 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8638 flush_scheduled_work();
8639 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8641 spin_lock(&ipr_driver_lock);
8642 list_del(&ioa_cfg->queue);
8643 spin_unlock(&ipr_driver_lock);
8645 if (ioa_cfg->sdt_state == ABORT_DUMP)
8646 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8647 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8649 ipr_free_all_resources(ioa_cfg);
8655 * ipr_remove - IOA hot plug remove entry point
8656 * @pdev: pci device struct
8658 * Adapter hot plug remove entry point.
8663 static void __devexit ipr_remove(struct pci_dev *pdev)
8665 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8669 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8671 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8673 scsi_remove_host(ioa_cfg->host);
8681 * ipr_probe - Adapter hot plug add entry point
8684 * 0 on success / non-zero on failure
8686 static int __devinit ipr_probe(struct pci_dev *pdev,
8687 const struct pci_device_id *dev_id)
8689 struct ipr_ioa_cfg *ioa_cfg;
8692 rc = ipr_probe_ioa(pdev, dev_id);
8697 ioa_cfg = pci_get_drvdata(pdev);
8698 rc = ipr_probe_ioa_part2(ioa_cfg);
8705 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8712 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8716 scsi_remove_host(ioa_cfg->host);
8721 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8725 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8727 scsi_remove_host(ioa_cfg->host);
8732 scsi_scan_host(ioa_cfg->host);
8733 ipr_scan_vsets(ioa_cfg);
8734 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8735 ioa_cfg->allow_ml_add_del = 1;
8736 ioa_cfg->host->max_channel = IPR_VSET_BUS;
8737 schedule_work(&ioa_cfg->work_q);
8742 * ipr_shutdown - Shutdown handler.
8743 * @pdev: pci device struct
8745 * This function is invoked upon system shutdown/reboot. It will issue
8746 * an adapter shutdown to the adapter to flush the write cache.
8751 static void ipr_shutdown(struct pci_dev *pdev)
8753 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8754 unsigned long lock_flags = 0;
8756 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8757 while(ioa_cfg->in_reset_reload) {
8758 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8759 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8760 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8763 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8765 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8768 static struct pci_device_id ipr_pci_table[] __devinitdata = {
8769 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8771 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8773 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8775 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8776 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8777 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8778 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8779 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8780 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8781 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8782 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8783 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8784 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8785 IPR_USE_LONG_TRANSOP_TIMEOUT },
8786 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8787 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8788 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8789 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8790 IPR_USE_LONG_TRANSOP_TIMEOUT },
8791 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8792 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8793 IPR_USE_LONG_TRANSOP_TIMEOUT },
8794 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8795 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8796 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8797 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8798 IPR_USE_LONG_TRANSOP_TIMEOUT},
8799 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8800 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8801 IPR_USE_LONG_TRANSOP_TIMEOUT },
8802 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8803 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8804 IPR_USE_LONG_TRANSOP_TIMEOUT },
8805 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8807 IPR_USE_LONG_TRANSOP_TIMEOUT },
8808 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8809 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8810 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8812 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8813 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8814 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8815 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8816 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8817 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8818 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8819 IPR_USE_LONG_TRANSOP_TIMEOUT },
8820 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8821 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8822 IPR_USE_LONG_TRANSOP_TIMEOUT },
8823 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8824 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8825 IPR_USE_LONG_TRANSOP_TIMEOUT },
8828 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8830 static struct pci_error_handlers ipr_err_handler = {
8831 .error_detected = ipr_pci_error_detected,
8832 .slot_reset = ipr_pci_slot_reset,
8835 static struct pci_driver ipr_driver = {
8837 .id_table = ipr_pci_table,
8839 .remove = __devexit_p(ipr_remove),
8840 .shutdown = ipr_shutdown,
8841 .err_handler = &ipr_err_handler,
8845 * ipr_init - Module entry point
8848 * 0 on success / negative value on failure
8850 static int __init ipr_init(void)
8852 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8853 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8855 return pci_register_driver(&ipr_driver);
8859 * ipr_exit - Module unload
8861 * Module unload entry point.
8866 static void __exit ipr_exit(void)
8868 pci_unregister_driver(&ipr_driver);
8871 module_init(ipr_init);
8872 module_exit(ipr_exit);