2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
54 #include <linux/kthread.h>
56 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
57 #define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
58 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
60 /* Embedded module documentation macros - see modules.h */
61 MODULE_AUTHOR("Hewlett-Packard Company");
62 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
63 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
64 " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
65 " Smart Array G2 Series SAS/SATA Controllers");
66 MODULE_VERSION("3.6.20");
67 MODULE_LICENSE("GPL");
69 #include "cciss_cmd.h"
71 #include <linux/cciss_ioctl.h>
73 /* define the PCI info for the cards we can control */
74 static const struct pci_device_id cciss_pci_device_id[] = {
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
82 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
83 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
102 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
103 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
107 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
109 /* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers
113 static struct board_type products[] = {
114 {0x40700E11, "Smart Array 5300", &SA5_access},
115 {0x40800E11, "Smart Array 5i", &SA5B_access},
116 {0x40820E11, "Smart Array 532", &SA5B_access},
117 {0x40830E11, "Smart Array 5312", &SA5B_access},
118 {0x409A0E11, "Smart Array 641", &SA5_access},
119 {0x409B0E11, "Smart Array 642", &SA5_access},
120 {0x409C0E11, "Smart Array 6400", &SA5_access},
121 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 {0x40910E11, "Smart Array 6i", &SA5_access},
123 {0x3225103C, "Smart Array P600", &SA5_access},
124 {0x3223103C, "Smart Array P800", &SA5_access},
125 {0x3234103C, "Smart Array P400", &SA5_access},
126 {0x3235103C, "Smart Array P400i", &SA5_access},
127 {0x3211103C, "Smart Array E200i", &SA5_access},
128 {0x3212103C, "Smart Array E200", &SA5_access},
129 {0x3213103C, "Smart Array E200i", &SA5_access},
130 {0x3214103C, "Smart Array E200i", &SA5_access},
131 {0x3215103C, "Smart Array E200i", &SA5_access},
132 {0x3237103C, "Smart Array E500", &SA5_access},
133 {0x323D103C, "Smart Array P700m", &SA5_access},
134 {0x3241103C, "Smart Array P212", &SA5_access},
135 {0x3243103C, "Smart Array P410", &SA5_access},
136 {0x3245103C, "Smart Array P410i", &SA5_access},
137 {0x3247103C, "Smart Array P411", &SA5_access},
138 {0x3249103C, "Smart Array P812", &SA5_access},
139 {0x324A103C, "Smart Array P712m", &SA5_access},
140 {0x324B103C, "Smart Array P711m", &SA5_access},
141 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
144 /* How long to wait (in milliseconds) for board to go into simple mode */
145 #define MAX_CONFIG_WAIT 30000
146 #define MAX_IOCTL_CONFIG_WAIT 1000
148 /*define how many times we will try a command because of bus resets */
149 #define MAX_CMD_RETRIES 3
153 /* Originally cciss driver only supports 8 major numbers */
154 #define MAX_CTLR_ORIG 8
156 static ctlr_info_t *hba[MAX_CTLR];
158 static void do_cciss_request(struct request_queue *q);
159 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
160 static int cciss_open(struct block_device *bdev, fmode_t mode);
161 static int cciss_release(struct gendisk *disk, fmode_t mode);
162 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
163 unsigned int cmd, unsigned long arg);
164 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
166 static int cciss_revalidate(struct gendisk *disk);
167 static int rebuild_lun_table(ctlr_info_t *h, int first_time);
168 static int deregister_disk(ctlr_info_t *h, int drv_index,
171 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
172 sector_t *total_size, unsigned int *block_size);
173 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
174 sector_t *total_size, unsigned int *block_size);
175 static void cciss_geometry_inquiry(int ctlr, int logvol,
176 int withirq, sector_t total_size,
177 unsigned int block_size, InquiryData_struct *inq_buff,
178 drive_info_struct *drv);
179 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
181 static void start_io(ctlr_info_t *h);
182 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
183 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
184 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
185 __u8 page_code, unsigned char scsi3addr[],
188 static void fail_all_cmds(unsigned long ctlr);
189 static int scan_thread(void *data);
190 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
192 #ifdef CONFIG_PROC_FS
193 static void cciss_procinit(int i);
195 static void cciss_procinit(int i)
198 #endif /* CONFIG_PROC_FS */
201 static int cciss_compat_ioctl(struct block_device *, fmode_t,
202 unsigned, unsigned long);
205 static struct block_device_operations cciss_fops = {
206 .owner = THIS_MODULE,
208 .release = cciss_release,
209 .locked_ioctl = cciss_ioctl,
210 .getgeo = cciss_getgeo,
212 .compat_ioctl = cciss_compat_ioctl,
214 .revalidate_disk = cciss_revalidate,
218 * Enqueuing and dequeuing functions for cmdlists.
220 static inline void addQ(struct hlist_head *list, CommandList_struct *c)
222 hlist_add_head(&c->list, list);
225 static inline void removeQ(CommandList_struct *c)
227 if (WARN_ON(hlist_unhashed(&c->list)))
230 hlist_del_init(&c->list);
233 #include "cciss_scsi.c" /* For SCSI tape support */
235 #define RAID_UNKNOWN 6
237 #ifdef CONFIG_PROC_FS
240 * Report information about this controller.
242 #define ENG_GIG 1000000000
243 #define ENG_GIG_FACTOR (ENG_GIG/512)
244 #define ENGAGE_SCSI "engage scsi"
245 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
249 static struct proc_dir_entry *proc_cciss;
251 static void cciss_seq_show_header(struct seq_file *seq)
253 ctlr_info_t *h = seq->private;
255 seq_printf(seq, "%s: HP %s Controller\n"
256 "Board ID: 0x%08lx\n"
257 "Firmware Version: %c%c%c%c\n"
259 "Logical drives: %d\n"
260 "Current Q depth: %d\n"
261 "Current # commands on controller: %d\n"
262 "Max Q depth since init: %d\n"
263 "Max # commands on controller since init: %d\n"
264 "Max SG entries since init: %d\n",
267 (unsigned long)h->board_id,
268 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
269 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
271 h->Qdepth, h->commands_outstanding,
272 h->maxQsinceinit, h->max_outstanding, h->maxSG);
274 #ifdef CONFIG_CISS_SCSI_TAPE
275 cciss_seq_tape_report(seq, h->ctlr);
276 #endif /* CONFIG_CISS_SCSI_TAPE */
279 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
281 ctlr_info_t *h = seq->private;
282 unsigned ctlr = h->ctlr;
285 /* prevent displaying bogus info during configuration
286 * or deconfiguration of a logical volume
288 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
289 if (h->busy_configuring) {
290 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
291 return ERR_PTR(-EBUSY);
293 h->busy_configuring = 1;
294 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
297 cciss_seq_show_header(seq);
302 static int cciss_seq_show(struct seq_file *seq, void *v)
304 sector_t vol_sz, vol_sz_frac;
305 ctlr_info_t *h = seq->private;
306 unsigned ctlr = h->ctlr;
308 drive_info_struct *drv = &h->drv[*pos];
310 if (*pos > h->highest_lun)
316 vol_sz = drv->nr_blocks;
317 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
319 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
321 if (drv->raid_level > 5)
322 drv->raid_level = RAID_UNKNOWN;
323 seq_printf(seq, "cciss/c%dd%d:"
324 "\t%4u.%02uGB\tRAID %s\n",
325 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
326 raid_label[drv->raid_level]);
330 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
332 ctlr_info_t *h = seq->private;
334 if (*pos > h->highest_lun)
341 static void cciss_seq_stop(struct seq_file *seq, void *v)
343 ctlr_info_t *h = seq->private;
345 /* Only reset h->busy_configuring if we succeeded in setting
346 * it during cciss_seq_start. */
347 if (v == ERR_PTR(-EBUSY))
350 h->busy_configuring = 0;
353 static struct seq_operations cciss_seq_ops = {
354 .start = cciss_seq_start,
355 .show = cciss_seq_show,
356 .next = cciss_seq_next,
357 .stop = cciss_seq_stop,
360 static int cciss_seq_open(struct inode *inode, struct file *file)
362 int ret = seq_open(file, &cciss_seq_ops);
363 struct seq_file *seq = file->private_data;
366 seq->private = PDE(inode)->data;
372 cciss_proc_write(struct file *file, const char __user *buf,
373 size_t length, loff_t *ppos)
378 #ifndef CONFIG_CISS_SCSI_TAPE
382 if (!buf || length > PAGE_SIZE - 1)
385 buffer = (char *)__get_free_page(GFP_KERNEL);
390 if (copy_from_user(buffer, buf, length))
392 buffer[length] = '\0';
394 #ifdef CONFIG_CISS_SCSI_TAPE
395 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
396 struct seq_file *seq = file->private_data;
397 ctlr_info_t *h = seq->private;
400 rc = cciss_engage_scsi(h->ctlr);
406 #endif /* CONFIG_CISS_SCSI_TAPE */
408 /* might be nice to have "disengage" too, but it's not
409 safely possible. (only 1 module use count, lock issues.) */
412 free_page((unsigned long)buffer);
416 static struct file_operations cciss_proc_fops = {
417 .owner = THIS_MODULE,
418 .open = cciss_seq_open,
421 .release = seq_release,
422 .write = cciss_proc_write,
425 static void __devinit cciss_procinit(int i)
427 struct proc_dir_entry *pde;
429 if (proc_cciss == NULL)
430 proc_cciss = proc_mkdir("driver/cciss", NULL);
433 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
435 &cciss_proc_fops, hba[i]);
437 #endif /* CONFIG_PROC_FS */
439 #define MAX_PRODUCT_NAME_LEN 19
441 #define to_hba(n) container_of(n, struct ctlr_info, dev)
442 #define to_drv(n) container_of(n, drive_info_struct, dev)
444 static struct device_type cciss_host_type = {
445 .name = "cciss_host",
448 static ssize_t dev_show_unique_id(struct device *dev,
449 struct device_attribute *attr,
452 drive_info_struct *drv = to_drv(dev);
453 struct ctlr_info *h = to_hba(drv->dev.parent);
458 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
459 if (h->busy_configuring)
462 memcpy(sn, drv->serial_no, sizeof(sn));
463 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
468 return snprintf(buf, 16 * 2 + 2,
469 "%02X%02X%02X%02X%02X%02X%02X%02X"
470 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
471 sn[0], sn[1], sn[2], sn[3],
472 sn[4], sn[5], sn[6], sn[7],
473 sn[8], sn[9], sn[10], sn[11],
474 sn[12], sn[13], sn[14], sn[15]);
476 DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
478 static ssize_t dev_show_vendor(struct device *dev,
479 struct device_attribute *attr,
482 drive_info_struct *drv = to_drv(dev);
483 struct ctlr_info *h = to_hba(drv->dev.parent);
484 char vendor[VENDOR_LEN + 1];
488 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
489 if (h->busy_configuring)
492 memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
493 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
498 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
500 DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
502 static ssize_t dev_show_model(struct device *dev,
503 struct device_attribute *attr,
506 drive_info_struct *drv = to_drv(dev);
507 struct ctlr_info *h = to_hba(drv->dev.parent);
508 char model[MODEL_LEN + 1];
512 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
513 if (h->busy_configuring)
516 memcpy(model, drv->model, MODEL_LEN + 1);
517 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
522 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
524 DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
526 static ssize_t dev_show_rev(struct device *dev,
527 struct device_attribute *attr,
530 drive_info_struct *drv = to_drv(dev);
531 struct ctlr_info *h = to_hba(drv->dev.parent);
532 char rev[REV_LEN + 1];
536 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
537 if (h->busy_configuring)
540 memcpy(rev, drv->rev, REV_LEN + 1);
541 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
546 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
548 DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
550 static struct attribute *cciss_dev_attrs[] = {
551 &dev_attr_unique_id.attr,
552 &dev_attr_model.attr,
553 &dev_attr_vendor.attr,
558 static struct attribute_group cciss_dev_attr_group = {
559 .attrs = cciss_dev_attrs,
562 static struct attribute_group *cciss_dev_attr_groups[] = {
563 &cciss_dev_attr_group,
567 static struct device_type cciss_dev_type = {
568 .name = "cciss_device",
569 .groups = cciss_dev_attr_groups,
572 static struct bus_type cciss_bus_type = {
578 * Initialize sysfs entry for each controller. This sets up and registers
579 * the 'cciss#' directory for each individual controller under
580 * /sys/bus/pci/devices/<dev>/.
582 static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
584 device_initialize(&h->dev);
585 h->dev.type = &cciss_host_type;
586 h->dev.bus = &cciss_bus_type;
587 dev_set_name(&h->dev, "%s", h->devname);
588 h->dev.parent = &h->pdev->dev;
590 return device_add(&h->dev);
594 * Remove sysfs entries for an hba.
596 static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
602 * Initialize sysfs for each logical drive. This sets up and registers
603 * the 'c#d#' directory for each individual logical drive under
604 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
605 * /sys/block/cciss!c#d# to this entry.
607 static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
608 drive_info_struct *drv,
611 device_initialize(&drv->dev);
612 drv->dev.type = &cciss_dev_type;
613 drv->dev.bus = &cciss_bus_type;
614 dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
615 drv->dev.parent = &h->dev;
616 return device_add(&drv->dev);
620 * Remove sysfs entries for a logical drive.
622 static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
624 device_del(&drv->dev);
628 * For operations that cannot sleep, a command block is allocated at init,
629 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
630 * which ones are free or in use. For operations that can wait for kmalloc
631 * to possible sleep, this routine can be called with get_from_pool set to 0.
632 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
634 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
636 CommandList_struct *c;
639 dma_addr_t cmd_dma_handle, err_dma_handle;
641 if (!get_from_pool) {
642 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
643 sizeof(CommandList_struct), &cmd_dma_handle);
646 memset(c, 0, sizeof(CommandList_struct));
650 c->err_info = (ErrorInfo_struct *)
651 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
654 if (c->err_info == NULL) {
655 pci_free_consistent(h->pdev,
656 sizeof(CommandList_struct), c, cmd_dma_handle);
659 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
660 } else { /* get it out of the controllers pool */
663 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
666 } while (test_and_set_bit
667 (i & (BITS_PER_LONG - 1),
668 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
670 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
673 memset(c, 0, sizeof(CommandList_struct));
674 cmd_dma_handle = h->cmd_pool_dhandle
675 + i * sizeof(CommandList_struct);
676 c->err_info = h->errinfo_pool + i;
677 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
678 err_dma_handle = h->errinfo_pool_dhandle
679 + i * sizeof(ErrorInfo_struct);
685 INIT_HLIST_NODE(&c->list);
686 c->busaddr = (__u32) cmd_dma_handle;
687 temp64.val = (__u64) err_dma_handle;
688 c->ErrDesc.Addr.lower = temp64.val32.lower;
689 c->ErrDesc.Addr.upper = temp64.val32.upper;
690 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
697 * Frees a command block that was previously allocated with cmd_alloc().
699 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
704 if (!got_from_pool) {
705 temp64.val32.lower = c->ErrDesc.Addr.lower;
706 temp64.val32.upper = c->ErrDesc.Addr.upper;
707 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
708 c->err_info, (dma_addr_t) temp64.val);
709 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
710 c, (dma_addr_t) c->busaddr);
713 clear_bit(i & (BITS_PER_LONG - 1),
714 h->cmd_pool_bits + (i / BITS_PER_LONG));
719 static inline ctlr_info_t *get_host(struct gendisk *disk)
721 return disk->queue->queuedata;
724 static inline drive_info_struct *get_drv(struct gendisk *disk)
726 return disk->private_data;
730 * Open. Make sure the device is really there.
732 static int cciss_open(struct block_device *bdev, fmode_t mode)
734 ctlr_info_t *host = get_host(bdev->bd_disk);
735 drive_info_struct *drv = get_drv(bdev->bd_disk);
738 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
739 #endif /* CCISS_DEBUG */
741 if (host->busy_initializing || drv->busy_configuring)
744 * Root is allowed to open raw volume zero even if it's not configured
745 * so array config can still work. Root is also allowed to open any
746 * volume that has a LUN ID, so it can issue IOCTL to reread the
747 * disk information. I don't think I really like this
748 * but I'm already using way to many device nodes to claim another one
749 * for "raw controller".
751 if (drv->heads == 0) {
752 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
753 /* if not node 0 make sure it is a partition = 0 */
754 if (MINOR(bdev->bd_dev) & 0x0f) {
756 /* if it is, make sure we have a LUN ID */
757 } else if (drv->LunID == 0) {
761 if (!capable(CAP_SYS_ADMIN))
772 static int cciss_release(struct gendisk *disk, fmode_t mode)
774 ctlr_info_t *host = get_host(disk);
775 drive_info_struct *drv = get_drv(disk);
778 printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
779 #endif /* CCISS_DEBUG */
788 static int do_ioctl(struct block_device *bdev, fmode_t mode,
789 unsigned cmd, unsigned long arg)
793 ret = cciss_ioctl(bdev, mode, cmd, arg);
798 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
799 unsigned cmd, unsigned long arg);
800 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
801 unsigned cmd, unsigned long arg);
803 static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
804 unsigned cmd, unsigned long arg)
807 case CCISS_GETPCIINFO:
808 case CCISS_GETINTINFO:
809 case CCISS_SETINTINFO:
810 case CCISS_GETNODENAME:
811 case CCISS_SETNODENAME:
812 case CCISS_GETHEARTBEAT:
813 case CCISS_GETBUSTYPES:
814 case CCISS_GETFIRMVER:
815 case CCISS_GETDRIVVER:
816 case CCISS_REVALIDVOLS:
817 case CCISS_DEREGDISK:
818 case CCISS_REGNEWDISK:
820 case CCISS_RESCANDISK:
821 case CCISS_GETLUNINFO:
822 return do_ioctl(bdev, mode, cmd, arg);
824 case CCISS_PASSTHRU32:
825 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
826 case CCISS_BIG_PASSTHRU32:
827 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
834 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
835 unsigned cmd, unsigned long arg)
837 IOCTL32_Command_struct __user *arg32 =
838 (IOCTL32_Command_struct __user *) arg;
839 IOCTL_Command_struct arg64;
840 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
846 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
847 sizeof(arg64.LUN_info));
849 copy_from_user(&arg64.Request, &arg32->Request,
850 sizeof(arg64.Request));
852 copy_from_user(&arg64.error_info, &arg32->error_info,
853 sizeof(arg64.error_info));
854 err |= get_user(arg64.buf_size, &arg32->buf_size);
855 err |= get_user(cp, &arg32->buf);
856 arg64.buf = compat_ptr(cp);
857 err |= copy_to_user(p, &arg64, sizeof(arg64));
862 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
866 copy_in_user(&arg32->error_info, &p->error_info,
867 sizeof(arg32->error_info));
873 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
874 unsigned cmd, unsigned long arg)
876 BIG_IOCTL32_Command_struct __user *arg32 =
877 (BIG_IOCTL32_Command_struct __user *) arg;
878 BIG_IOCTL_Command_struct arg64;
879 BIG_IOCTL_Command_struct __user *p =
880 compat_alloc_user_space(sizeof(arg64));
886 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
887 sizeof(arg64.LUN_info));
889 copy_from_user(&arg64.Request, &arg32->Request,
890 sizeof(arg64.Request));
892 copy_from_user(&arg64.error_info, &arg32->error_info,
893 sizeof(arg64.error_info));
894 err |= get_user(arg64.buf_size, &arg32->buf_size);
895 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
896 err |= get_user(cp, &arg32->buf);
897 arg64.buf = compat_ptr(cp);
898 err |= copy_to_user(p, &arg64, sizeof(arg64));
903 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
907 copy_in_user(&arg32->error_info, &p->error_info,
908 sizeof(arg32->error_info));
915 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
917 drive_info_struct *drv = get_drv(bdev->bd_disk);
922 geo->heads = drv->heads;
923 geo->sectors = drv->sectors;
924 geo->cylinders = drv->cylinders;
928 static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
930 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
931 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
932 (void)check_for_unit_attention(host, c);
937 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
938 unsigned int cmd, unsigned long arg)
940 struct gendisk *disk = bdev->bd_disk;
941 ctlr_info_t *host = get_host(disk);
942 drive_info_struct *drv = get_drv(disk);
943 int ctlr = host->ctlr;
944 void __user *argp = (void __user *)arg;
947 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
948 #endif /* CCISS_DEBUG */
951 case CCISS_GETPCIINFO:
953 cciss_pci_info_struct pciinfo;
957 pciinfo.domain = pci_domain_nr(host->pdev->bus);
958 pciinfo.bus = host->pdev->bus->number;
959 pciinfo.dev_fn = host->pdev->devfn;
960 pciinfo.board_id = host->board_id;
962 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
966 case CCISS_GETINTINFO:
968 cciss_coalint_struct intinfo;
972 readl(&host->cfgtable->HostWrite.CoalIntDelay);
974 readl(&host->cfgtable->HostWrite.CoalIntCount);
976 (argp, &intinfo, sizeof(cciss_coalint_struct)))
980 case CCISS_SETINTINFO:
982 cciss_coalint_struct intinfo;
988 if (!capable(CAP_SYS_ADMIN))
991 (&intinfo, argp, sizeof(cciss_coalint_struct)))
993 if ((intinfo.delay == 0) && (intinfo.count == 0))
995 // printk("cciss_ioctl: delay and count cannot be 0\n");
998 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
999 /* Update the field, and then ring the doorbell */
1000 writel(intinfo.delay,
1001 &(host->cfgtable->HostWrite.CoalIntDelay));
1002 writel(intinfo.count,
1003 &(host->cfgtable->HostWrite.CoalIntCount));
1004 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
1006 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
1007 if (!(readl(host->vaddr + SA5_DOORBELL)
1008 & CFGTBL_ChangeReq))
1010 /* delay and try again */
1013 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1014 if (i >= MAX_IOCTL_CONFIG_WAIT)
1018 case CCISS_GETNODENAME:
1020 NodeName_type NodeName;
1025 for (i = 0; i < 16; i++)
1027 readb(&host->cfgtable->ServerName[i]);
1028 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
1032 case CCISS_SETNODENAME:
1034 NodeName_type NodeName;
1035 unsigned long flags;
1040 if (!capable(CAP_SYS_ADMIN))
1044 (NodeName, argp, sizeof(NodeName_type)))
1047 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1049 /* Update the field, and then ring the doorbell */
1050 for (i = 0; i < 16; i++)
1052 &host->cfgtable->ServerName[i]);
1054 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
1056 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
1057 if (!(readl(host->vaddr + SA5_DOORBELL)
1058 & CFGTBL_ChangeReq))
1060 /* delay and try again */
1063 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1064 if (i >= MAX_IOCTL_CONFIG_WAIT)
1069 case CCISS_GETHEARTBEAT:
1071 Heartbeat_type heartbeat;
1075 heartbeat = readl(&host->cfgtable->HeartBeat);
1077 (argp, &heartbeat, sizeof(Heartbeat_type)))
1081 case CCISS_GETBUSTYPES:
1083 BusTypes_type BusTypes;
1087 BusTypes = readl(&host->cfgtable->BusTypes);
1089 (argp, &BusTypes, sizeof(BusTypes_type)))
1093 case CCISS_GETFIRMVER:
1095 FirmwareVer_type firmware;
1099 memcpy(firmware, host->firm_ver, 4);
1102 (argp, firmware, sizeof(FirmwareVer_type)))
1106 case CCISS_GETDRIVVER:
1108 DriverVer_type DriverVer = DRIVER_VERSION;
1114 (argp, &DriverVer, sizeof(DriverVer_type)))
1119 case CCISS_DEREGDISK:
1121 case CCISS_REVALIDVOLS:
1122 return rebuild_lun_table(host, 0);
1124 case CCISS_GETLUNINFO:{
1125 LogvolInfo_struct luninfo;
1127 luninfo.LunID = drv->LunID;
1128 luninfo.num_opens = drv->usage_count;
1129 luninfo.num_parts = 0;
1130 if (copy_to_user(argp, &luninfo,
1131 sizeof(LogvolInfo_struct)))
1135 case CCISS_PASSTHRU:
1137 IOCTL_Command_struct iocommand;
1138 CommandList_struct *c;
1141 unsigned long flags;
1142 DECLARE_COMPLETION_ONSTACK(wait);
1147 if (!capable(CAP_SYS_RAWIO))
1151 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
1153 if ((iocommand.buf_size < 1) &&
1154 (iocommand.Request.Type.Direction != XFER_NONE)) {
1157 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
1158 /* Check kmalloc limits */
1159 if (iocommand.buf_size > 128000)
1162 if (iocommand.buf_size > 0) {
1163 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
1167 if (iocommand.Request.Type.Direction == XFER_WRITE) {
1168 /* Copy the data into the buffer we created */
1170 (buff, iocommand.buf, iocommand.buf_size)) {
1175 memset(buff, 0, iocommand.buf_size);
1177 if ((c = cmd_alloc(host, 0)) == NULL) {
1181 // Fill in the command type
1182 c->cmd_type = CMD_IOCTL_PEND;
1183 // Fill in Command Header
1184 c->Header.ReplyQueue = 0; // unused in simple mode
1185 if (iocommand.buf_size > 0) // buffer to fill
1187 c->Header.SGList = 1;
1188 c->Header.SGTotal = 1;
1189 } else // no buffers to fill
1191 c->Header.SGList = 0;
1192 c->Header.SGTotal = 0;
1194 c->Header.LUN = iocommand.LUN_info;
1195 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1197 // Fill in Request block
1198 c->Request = iocommand.Request;
1200 // Fill in the scatter gather information
1201 if (iocommand.buf_size > 0) {
1202 temp64.val = pci_map_single(host->pdev, buff,
1204 PCI_DMA_BIDIRECTIONAL);
1205 c->SG[0].Addr.lower = temp64.val32.lower;
1206 c->SG[0].Addr.upper = temp64.val32.upper;
1207 c->SG[0].Len = iocommand.buf_size;
1208 c->SG[0].Ext = 0; // we are not chaining
1212 /* Put the request on the tail of the request queue */
1213 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1214 addQ(&host->reqQ, c);
1217 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1219 wait_for_completion(&wait);
1221 /* unlock the buffers from DMA */
1222 temp64.val32.lower = c->SG[0].Addr.lower;
1223 temp64.val32.upper = c->SG[0].Addr.upper;
1224 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1226 PCI_DMA_BIDIRECTIONAL);
1228 check_ioctl_unit_attention(host, c);
1230 /* Copy the error information out */
1231 iocommand.error_info = *(c->err_info);
1233 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1235 cmd_free(host, c, 0);
1239 if (iocommand.Request.Type.Direction == XFER_READ) {
1240 /* Copy the data out of the buffer we created */
1242 (iocommand.buf, buff, iocommand.buf_size)) {
1244 cmd_free(host, c, 0);
1249 cmd_free(host, c, 0);
1252 case CCISS_BIG_PASSTHRU:{
1253 BIG_IOCTL_Command_struct *ioc;
1254 CommandList_struct *c;
1255 unsigned char **buff = NULL;
1256 int *buff_size = NULL;
1258 unsigned long flags;
1262 DECLARE_COMPLETION_ONSTACK(wait);
1265 BYTE __user *data_ptr;
1269 if (!capable(CAP_SYS_RAWIO))
1271 ioc = (BIG_IOCTL_Command_struct *)
1272 kmalloc(sizeof(*ioc), GFP_KERNEL);
1277 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1281 if ((ioc->buf_size < 1) &&
1282 (ioc->Request.Type.Direction != XFER_NONE)) {
1286 /* Check kmalloc limits using all SGs */
1287 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1291 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1296 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1301 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1307 left = ioc->buf_size;
1308 data_ptr = ioc->buf;
1311 ioc->malloc_size) ? ioc->
1313 buff_size[sg_used] = sz;
1314 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1315 if (buff[sg_used] == NULL) {
1319 if (ioc->Request.Type.Direction == XFER_WRITE) {
1321 (buff[sg_used], data_ptr, sz)) {
1326 memset(buff[sg_used], 0, sz);
1332 if ((c = cmd_alloc(host, 0)) == NULL) {
1336 c->cmd_type = CMD_IOCTL_PEND;
1337 c->Header.ReplyQueue = 0;
1339 if (ioc->buf_size > 0) {
1340 c->Header.SGList = sg_used;
1341 c->Header.SGTotal = sg_used;
1343 c->Header.SGList = 0;
1344 c->Header.SGTotal = 0;
1346 c->Header.LUN = ioc->LUN_info;
1347 c->Header.Tag.lower = c->busaddr;
1349 c->Request = ioc->Request;
1350 if (ioc->buf_size > 0) {
1352 for (i = 0; i < sg_used; i++) {
1354 pci_map_single(host->pdev, buff[i],
1356 PCI_DMA_BIDIRECTIONAL);
1357 c->SG[i].Addr.lower =
1359 c->SG[i].Addr.upper =
1361 c->SG[i].Len = buff_size[i];
1362 c->SG[i].Ext = 0; /* we are not chaining */
1366 /* Put the request on the tail of the request queue */
1367 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1368 addQ(&host->reqQ, c);
1371 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1372 wait_for_completion(&wait);
1373 /* unlock the buffers from DMA */
1374 for (i = 0; i < sg_used; i++) {
1375 temp64.val32.lower = c->SG[i].Addr.lower;
1376 temp64.val32.upper = c->SG[i].Addr.upper;
1377 pci_unmap_single(host->pdev,
1378 (dma_addr_t) temp64.val, buff_size[i],
1379 PCI_DMA_BIDIRECTIONAL);
1381 check_ioctl_unit_attention(host, c);
1382 /* Copy the error information out */
1383 ioc->error_info = *(c->err_info);
1384 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1385 cmd_free(host, c, 0);
1389 if (ioc->Request.Type.Direction == XFER_READ) {
1390 /* Copy the data out of the buffer we created */
1391 BYTE __user *ptr = ioc->buf;
1392 for (i = 0; i < sg_used; i++) {
1394 (ptr, buff[i], buff_size[i])) {
1395 cmd_free(host, c, 0);
1399 ptr += buff_size[i];
1402 cmd_free(host, c, 0);
1406 for (i = 0; i < sg_used; i++)
1415 /* scsi_cmd_ioctl handles these, below, though some are not */
1416 /* very meaningful for cciss. SG_IO is the main one people want. */
1418 case SG_GET_VERSION_NUM:
1419 case SG_SET_TIMEOUT:
1420 case SG_GET_TIMEOUT:
1421 case SG_GET_RESERVED_SIZE:
1422 case SG_SET_RESERVED_SIZE:
1423 case SG_EMULATED_HOST:
1425 case SCSI_IOCTL_SEND_COMMAND:
1426 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1428 /* scsi_cmd_ioctl would normally handle these, below, but */
1429 /* they aren't a good fit for cciss, as CD-ROMs are */
1430 /* not supported, and we don't have any bus/target/lun */
1431 /* which we present to the kernel. */
1433 case CDROM_SEND_PACKET:
1434 case CDROMCLOSETRAY:
1436 case SCSI_IOCTL_GET_IDLUN:
1437 case SCSI_IOCTL_GET_BUS_NUMBER:
1443 static void cciss_check_queues(ctlr_info_t *h)
1445 int start_queue = h->next_to_run;
1448 /* check to see if we have maxed out the number of commands that can
1449 * be placed on the queue. If so then exit. We do this check here
1450 * in case the interrupt we serviced was from an ioctl and did not
1451 * free any new commands.
1453 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1456 /* We have room on the queue for more commands. Now we need to queue
1457 * them up. We will also keep track of the next queue to run so
1458 * that every queue gets a chance to be started first.
1460 for (i = 0; i < h->highest_lun + 1; i++) {
1461 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1462 /* make sure the disk has been added and the drive is real
1463 * because this can be called from the middle of init_one.
1465 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1467 blk_start_queue(h->gendisk[curr_queue]->queue);
1469 /* check to see if we have maxed out the number of commands
1470 * that can be placed on the queue.
1472 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1473 if (curr_queue == start_queue) {
1475 (start_queue + 1) % (h->highest_lun + 1);
1478 h->next_to_run = curr_queue;
1485 static void cciss_softirq_done(struct request *rq)
1487 CommandList_struct *cmd = rq->completion_data;
1488 ctlr_info_t *h = hba[cmd->ctlr];
1489 unsigned long flags;
1493 if (cmd->Request.Type.Direction == XFER_READ)
1494 ddir = PCI_DMA_FROMDEVICE;
1496 ddir = PCI_DMA_TODEVICE;
1498 /* command did not need to be retried */
1499 /* unmap the DMA mapping for all the scatter gather elements */
1500 for (i = 0; i < cmd->Header.SGList; i++) {
1501 temp64.val32.lower = cmd->SG[i].Addr.lower;
1502 temp64.val32.upper = cmd->SG[i].Addr.upper;
1503 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1507 printk("Done with %p\n", rq);
1508 #endif /* CCISS_DEBUG */
1510 /* set the residual count for pc requests */
1511 if (blk_pc_request(rq))
1512 rq->resid_len = cmd->err_info->ResidualCnt;
1514 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
1516 spin_lock_irqsave(&h->lock, flags);
1517 cmd_free(h, cmd, 1);
1518 cciss_check_queues(h);
1519 spin_unlock_irqrestore(&h->lock, flags);
1522 static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
1525 log_unit = h->drv[log_unit].LunID & 0x03fff;
1526 memset(&scsi3addr[4], 0, 4);
1527 memcpy(&scsi3addr[0], &log_unit, 4);
1528 scsi3addr[3] |= 0x40;
1531 /* This function gets the SCSI vendor, model, and revision of a logical drive
1532 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
1533 * they cannot be read.
1535 static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
1536 char *vendor, char *model, char *rev)
1539 InquiryData_struct *inq_buf;
1540 unsigned char scsi3addr[8];
1546 inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1550 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
1552 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
1553 sizeof(InquiryData_struct), 0,
1554 scsi3addr, TYPE_CMD);
1556 rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
1557 sizeof(InquiryData_struct), 0,
1558 scsi3addr, TYPE_CMD);
1560 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
1561 vendor[VENDOR_LEN] = '\0';
1562 memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
1563 model[MODEL_LEN] = '\0';
1564 memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
1565 rev[REV_LEN] = '\0';
1572 /* This function gets the serial number of a logical drive via
1573 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1574 * number cannot be had, for whatever reason, 16 bytes of 0xff
1575 * are returned instead.
1577 static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1578 unsigned char *serial_no, int buflen)
1580 #define PAGE_83_INQ_BYTES 64
1583 unsigned char scsi3addr[8];
1587 memset(serial_no, 0xff, buflen);
1588 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
1591 memset(serial_no, 0, buflen);
1592 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
1594 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1595 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
1597 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1598 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
1600 memcpy(serial_no, &buf[8], buflen);
1605 static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1608 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1609 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1610 disk->major = h->major;
1611 disk->first_minor = drv_index << NWD_SHIFT;
1612 disk->fops = &cciss_fops;
1613 disk->private_data = &h->drv[drv_index];
1614 disk->driverfs_dev = &h->drv[drv_index].dev;
1616 /* Set up queue information */
1617 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1619 /* This is a hardware imposed limit. */
1620 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1622 /* This is a limit in the driver and could be eliminated. */
1623 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1625 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1627 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1629 disk->queue->queuedata = h;
1631 blk_queue_logical_block_size(disk->queue,
1632 h->drv[drv_index].block_size);
1634 /* Make sure all queue data is written out before */
1635 /* setting h->drv[drv_index].queue, as setting this */
1636 /* allows the interrupt handler to start the queue */
1638 h->drv[drv_index].queue = disk->queue;
1642 /* This function will check the usage_count of the drive to be updated/added.
1643 * If the usage_count is zero and it is a heretofore unknown drive, or,
1644 * the drive's capacity, geometry, or serial number has changed,
1645 * then the drive information will be updated and the disk will be
1646 * re-registered with the kernel. If these conditions don't hold,
1647 * then it will be left alone for the next reboot. The exception to this
1648 * is disk 0 which will always be left registered with the kernel since it
1649 * is also the controller node. Any changes to disk 0 will show up on
1652 static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1654 ctlr_info_t *h = hba[ctlr];
1655 struct gendisk *disk;
1656 InquiryData_struct *inq_buff = NULL;
1657 unsigned int block_size;
1658 sector_t total_size;
1659 unsigned long flags = 0;
1661 drive_info_struct *drvinfo;
1662 int was_only_controller_node;
1664 /* Get information about the disk and modify the driver structure */
1665 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1666 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL);
1667 if (inq_buff == NULL || drvinfo == NULL)
1670 /* See if we're trying to update the "controller node"
1671 * this will happen the when the first logical drive gets
1674 was_only_controller_node = (drv_index == 0 &&
1675 h->drv[0].raid_level == -1);
1677 /* testing to see if 16-byte CDBs are already being used */
1678 if (h->cciss_read == CCISS_READ_16) {
1679 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1680 &total_size, &block_size);
1683 cciss_read_capacity(ctlr, drv_index, 1,
1684 &total_size, &block_size);
1686 /* if read_capacity returns all F's this volume is >2TB */
1687 /* in size so we switch to 16-byte CDB's for all */
1688 /* read/write ops */
1689 if (total_size == 0xFFFFFFFFULL) {
1690 cciss_read_capacity_16(ctlr, drv_index, 1,
1691 &total_size, &block_size);
1692 h->cciss_read = CCISS_READ_16;
1693 h->cciss_write = CCISS_WRITE_16;
1695 h->cciss_read = CCISS_READ_10;
1696 h->cciss_write = CCISS_WRITE_10;
1700 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1702 drvinfo->block_size = block_size;
1703 drvinfo->nr_blocks = total_size + 1;
1705 cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
1706 drvinfo->model, drvinfo->rev);
1707 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1708 sizeof(drvinfo->serial_no));
1710 /* Is it the same disk we already know, and nothing's changed? */
1711 if (h->drv[drv_index].raid_level != -1 &&
1712 ((memcmp(drvinfo->serial_no,
1713 h->drv[drv_index].serial_no, 16) == 0) &&
1714 drvinfo->block_size == h->drv[drv_index].block_size &&
1715 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks &&
1716 drvinfo->heads == h->drv[drv_index].heads &&
1717 drvinfo->sectors == h->drv[drv_index].sectors &&
1718 drvinfo->cylinders == h->drv[drv_index].cylinders))
1719 /* The disk is unchanged, nothing to update */
1722 /* If we get here it's not the same disk, or something's changed,
1723 * so we need to * deregister it, and re-register it, if it's not
1725 * If the disk already exists then deregister it before proceeding
1726 * (unless it's the first disk (for the controller node).
1728 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) {
1729 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1730 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1731 h->drv[drv_index].busy_configuring = 1;
1732 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1734 /* deregister_disk sets h->drv[drv_index].queue = NULL
1735 * which keeps the interrupt handler from starting
1738 ret = deregister_disk(h, drv_index, 0);
1739 h->drv[drv_index].busy_configuring = 0;
1742 /* If the disk is in use return */
1746 /* Save the new information from cciss_geometry_inquiry
1747 * and serial number inquiry.
1749 h->drv[drv_index].block_size = drvinfo->block_size;
1750 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks;
1751 h->drv[drv_index].heads = drvinfo->heads;
1752 h->drv[drv_index].sectors = drvinfo->sectors;
1753 h->drv[drv_index].cylinders = drvinfo->cylinders;
1754 h->drv[drv_index].raid_level = drvinfo->raid_level;
1755 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1756 memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
1757 memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
1758 memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
1761 disk = h->gendisk[drv_index];
1762 set_capacity(disk, h->drv[drv_index].nr_blocks);
1764 /* If it's not disk 0 (drv_index != 0)
1765 * or if it was disk 0, but there was previously
1766 * no actual corresponding configured logical drive
1767 * (raid_leve == -1) then we want to update the
1768 * logical drive's information.
1770 if (drv_index || first_time)
1771 cciss_add_disk(h, disk, drv_index);
1778 printk(KERN_ERR "cciss: out of memory\n");
1782 /* This function will find the first index of the controllers drive array
1783 * that has a -1 for the raid_level and will return that index. This is
1784 * where new drives will be added. If the index to be returned is greater
1785 * than the highest_lun index for the controller then highest_lun is set
1786 * to this new index. If there are no available indexes then -1 is returned.
1787 * "controller_node" is used to know if this is a real logical drive, or just
1788 * the controller node, which determines if this counts towards highest_lun.
1790 static int cciss_find_free_drive_index(int ctlr, int controller_node)
1794 for (i = 0; i < CISS_MAX_LUN; i++) {
1795 if (hba[ctlr]->drv[i].raid_level == -1) {
1796 if (i > hba[ctlr]->highest_lun)
1797 if (!controller_node)
1798 hba[ctlr]->highest_lun = i;
1805 /* cciss_add_gendisk finds a free hba[]->drv structure
1806 * and allocates a gendisk if needed, and sets the lunid
1807 * in the drvinfo structure. It returns the index into
1808 * the ->drv[] array, or -1 if none are free.
1809 * is_controller_node indicates whether highest_lun should
1810 * count this disk, or if it's only being added to provide
1811 * a means to talk to the controller in case no logical
1812 * drives have yet been configured.
1814 static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1818 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node);
1819 if (drv_index == -1)
1821 /*Check if the gendisk needs to be allocated */
1822 if (!h->gendisk[drv_index]) {
1823 h->gendisk[drv_index] =
1824 alloc_disk(1 << NWD_SHIFT);
1825 if (!h->gendisk[drv_index]) {
1826 printk(KERN_ERR "cciss%d: could not "
1827 "allocate a new disk %d\n",
1828 h->ctlr, drv_index);
1832 h->drv[drv_index].LunID = lunid;
1833 if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
1836 /* Don't need to mark this busy because nobody */
1837 /* else knows about this disk yet to contend */
1838 /* for access to it. */
1839 h->drv[drv_index].busy_configuring = 0;
1844 put_disk(h->gendisk[drv_index]);
1845 h->gendisk[drv_index] = NULL;
1849 /* This is for the special case of a controller which
1850 * has no logical drives. In this case, we still need
1851 * to register a disk so the controller can be accessed
1852 * by the Array Config Utility.
1854 static void cciss_add_controller_node(ctlr_info_t *h)
1856 struct gendisk *disk;
1859 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1862 drv_index = cciss_add_gendisk(h, 0, 1);
1863 if (drv_index == -1) {
1864 printk(KERN_WARNING "cciss%d: could not "
1865 "add disk 0.\n", h->ctlr);
1868 h->drv[drv_index].block_size = 512;
1869 h->drv[drv_index].nr_blocks = 0;
1870 h->drv[drv_index].heads = 0;
1871 h->drv[drv_index].sectors = 0;
1872 h->drv[drv_index].cylinders = 0;
1873 h->drv[drv_index].raid_level = -1;
1874 memset(h->drv[drv_index].serial_no, 0, 16);
1875 disk = h->gendisk[drv_index];
1876 cciss_add_disk(h, disk, drv_index);
1879 /* This function will add and remove logical drives from the Logical
1880 * drive array of the controller and maintain persistency of ordering
1881 * so that mount points are preserved until the next reboot. This allows
1882 * for the removal of logical drives in the middle of the drive array
1883 * without a re-ordering of those drives.
1885 * h = The controller to perform the operations on
1887 static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1891 ReportLunData_struct *ld_buff = NULL;
1898 unsigned long flags;
1900 if (!capable(CAP_SYS_RAWIO))
1903 /* Set busy_configuring flag for this operation */
1904 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1905 if (h->busy_configuring) {
1906 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1909 h->busy_configuring = 1;
1910 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1912 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1913 if (ld_buff == NULL)
1916 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1917 sizeof(ReportLunData_struct),
1918 0, CTLR_LUNID, TYPE_CMD);
1920 if (return_code == IO_OK)
1921 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1922 else { /* reading number of logical volumes failed */
1923 printk(KERN_WARNING "cciss: report logical volume"
1924 " command failed\n");
1929 num_luns = listlength / 8; /* 8 bytes per entry */
1930 if (num_luns > CISS_MAX_LUN) {
1931 num_luns = CISS_MAX_LUN;
1932 printk(KERN_WARNING "cciss: more luns configured"
1933 " on controller than can be handled by"
1938 cciss_add_controller_node(h);
1940 /* Compare controller drive array to driver's drive array
1941 * to see if any drives are missing on the controller due
1942 * to action of Array Config Utility (user deletes drive)
1943 * and deregister logical drives which have disappeared.
1945 for (i = 0; i <= h->highest_lun; i++) {
1949 /* skip holes in the array from already deleted drives */
1950 if (h->drv[i].raid_level == -1)
1953 for (j = 0; j < num_luns; j++) {
1954 memcpy(&lunid, &ld_buff->LUN[j][0], 4);
1955 lunid = le32_to_cpu(lunid);
1956 if (h->drv[i].LunID == lunid) {
1962 /* Deregister it from the OS, it's gone. */
1963 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1964 h->drv[i].busy_configuring = 1;
1965 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1966 return_code = deregister_disk(h, i, 1);
1967 cciss_destroy_ld_sysfs_entry(&h->drv[i]);
1968 h->drv[i].busy_configuring = 0;
1972 /* Compare controller drive array to driver's drive array.
1973 * Check for updates in the drive information and any new drives
1974 * on the controller due to ACU adding logical drives, or changing
1975 * a logical drive's size, etc. Reregister any new/changed drives
1977 for (i = 0; i < num_luns; i++) {
1982 memcpy(&lunid, &ld_buff->LUN[i][0], 4);
1983 lunid = le32_to_cpu(lunid);
1985 /* Find if the LUN is already in the drive array
1986 * of the driver. If so then update its info
1987 * if not in use. If it does not exist then find
1988 * the first free index and add it.
1990 for (j = 0; j <= h->highest_lun; j++) {
1991 if (h->drv[j].raid_level != -1 &&
1992 h->drv[j].LunID == lunid) {
1999 /* check if the drive was found already in the array */
2001 drv_index = cciss_add_gendisk(h, lunid, 0);
2002 if (drv_index == -1)
2005 cciss_update_drive_info(ctlr, drv_index, first_time);
2010 h->busy_configuring = 0;
2011 /* We return -1 here to tell the ACU that we have registered/updated
2012 * all of the drives that we can and to keep it from calling us
2017 printk(KERN_ERR "cciss: out of memory\n");
2018 h->busy_configuring = 0;
2022 /* This function will deregister the disk and it's queue from the
2023 * kernel. It must be called with the controller lock held and the
2024 * drv structures busy_configuring flag set. It's parameters are:
2026 * disk = This is the disk to be deregistered
2027 * drv = This is the drive_info_struct associated with the disk to be
2028 * deregistered. It contains information about the disk used
2030 * clear_all = This flag determines whether or not the disk information
2031 * is going to be completely cleared out and the highest_lun
2032 * reset. Sometimes we want to clear out information about
2033 * the disk in preparation for re-adding it. In this case
2034 * the highest_lun should be left unchanged and the LunID
2035 * should not be cleared.
2037 static int deregister_disk(ctlr_info_t *h, int drv_index,
2041 struct gendisk *disk;
2042 drive_info_struct *drv;
2044 if (!capable(CAP_SYS_RAWIO))
2047 drv = &h->drv[drv_index];
2048 disk = h->gendisk[drv_index];
2050 /* make sure logical volume is NOT is use */
2051 if (clear_all || (h->gendisk[0] == disk)) {
2052 if (drv->usage_count > 1)
2054 } else if (drv->usage_count > 0)
2057 /* invalidate the devices and deregister the disk. If it is disk
2058 * zero do not deregister it but just zero out it's values. This
2059 * allows us to delete disk zero but keep the controller registered.
2061 if (h->gendisk[0] != disk) {
2062 struct request_queue *q = disk->queue;
2063 if (disk->flags & GENHD_FL_UP)
2066 blk_cleanup_queue(q);
2067 /* Set drv->queue to NULL so that we do not try
2068 * to call blk_start_queue on this queue in the
2073 /* If clear_all is set then we are deleting the logical
2074 * drive, not just refreshing its info. For drives
2075 * other than disk 0 we will call put_disk. We do not
2076 * do this for disk 0 as we need it to be able to
2077 * configure the controller.
2080 /* This isn't pretty, but we need to find the
2081 * disk in our array and NULL our the pointer.
2082 * This is so that we will call alloc_disk if
2083 * this index is used again later.
2085 for (i=0; i < CISS_MAX_LUN; i++){
2086 if (h->gendisk[i] == disk) {
2087 h->gendisk[i] = NULL;
2094 set_capacity(disk, 0);
2098 /* zero out the disk size info */
2100 drv->block_size = 0;
2104 drv->raid_level = -1; /* This can be used as a flag variable to
2105 * indicate that this element of the drive
2110 /* check to see if it was the last disk */
2111 if (drv == h->drv + h->highest_lun) {
2112 /* if so, find the new hightest lun */
2113 int i, newhighest = -1;
2114 for (i = 0; i <= h->highest_lun; i++) {
2115 /* if the disk has size > 0, it is available */
2116 if (h->drv[i].heads)
2119 h->highest_lun = newhighest;
2127 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
2128 size_t size, __u8 page_code, unsigned char *scsi3addr,
2131 ctlr_info_t *h = hba[ctlr];
2132 u64bit buff_dma_handle;
2135 c->cmd_type = CMD_IOCTL_PEND;
2136 c->Header.ReplyQueue = 0;
2138 c->Header.SGList = 1;
2139 c->Header.SGTotal = 1;
2141 c->Header.SGList = 0;
2142 c->Header.SGTotal = 0;
2144 c->Header.Tag.lower = c->busaddr;
2145 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2147 c->Request.Type.Type = cmd_type;
2148 if (cmd_type == TYPE_CMD) {
2151 /* are we trying to read a vital product page */
2152 if (page_code != 0) {
2153 c->Request.CDB[1] = 0x01;
2154 c->Request.CDB[2] = page_code;
2156 c->Request.CDBLen = 6;
2157 c->Request.Type.Attribute = ATTR_SIMPLE;
2158 c->Request.Type.Direction = XFER_READ;
2159 c->Request.Timeout = 0;
2160 c->Request.CDB[0] = CISS_INQUIRY;
2161 c->Request.CDB[4] = size & 0xFF;
2163 case CISS_REPORT_LOG:
2164 case CISS_REPORT_PHYS:
2165 /* Talking to controller so It's a physical command
2166 mode = 00 target = 0. Nothing to write.
2168 c->Request.CDBLen = 12;
2169 c->Request.Type.Attribute = ATTR_SIMPLE;
2170 c->Request.Type.Direction = XFER_READ;
2171 c->Request.Timeout = 0;
2172 c->Request.CDB[0] = cmd;
2173 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
2174 c->Request.CDB[7] = (size >> 16) & 0xFF;
2175 c->Request.CDB[8] = (size >> 8) & 0xFF;
2176 c->Request.CDB[9] = size & 0xFF;
2179 case CCISS_READ_CAPACITY:
2180 c->Request.CDBLen = 10;
2181 c->Request.Type.Attribute = ATTR_SIMPLE;
2182 c->Request.Type.Direction = XFER_READ;
2183 c->Request.Timeout = 0;
2184 c->Request.CDB[0] = cmd;
2186 case CCISS_READ_CAPACITY_16:
2187 c->Request.CDBLen = 16;
2188 c->Request.Type.Attribute = ATTR_SIMPLE;
2189 c->Request.Type.Direction = XFER_READ;
2190 c->Request.Timeout = 0;
2191 c->Request.CDB[0] = cmd;
2192 c->Request.CDB[1] = 0x10;
2193 c->Request.CDB[10] = (size >> 24) & 0xFF;
2194 c->Request.CDB[11] = (size >> 16) & 0xFF;
2195 c->Request.CDB[12] = (size >> 8) & 0xFF;
2196 c->Request.CDB[13] = size & 0xFF;
2197 c->Request.Timeout = 0;
2198 c->Request.CDB[0] = cmd;
2200 case CCISS_CACHE_FLUSH:
2201 c->Request.CDBLen = 12;
2202 c->Request.Type.Attribute = ATTR_SIMPLE;
2203 c->Request.Type.Direction = XFER_WRITE;
2204 c->Request.Timeout = 0;
2205 c->Request.CDB[0] = BMIC_WRITE;
2206 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2208 case TEST_UNIT_READY:
2209 c->Request.CDBLen = 6;
2210 c->Request.Type.Attribute = ATTR_SIMPLE;
2211 c->Request.Type.Direction = XFER_NONE;
2212 c->Request.Timeout = 0;
2216 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
2219 } else if (cmd_type == TYPE_MSG) {
2221 case 0: /* ABORT message */
2222 c->Request.CDBLen = 12;
2223 c->Request.Type.Attribute = ATTR_SIMPLE;
2224 c->Request.Type.Direction = XFER_WRITE;
2225 c->Request.Timeout = 0;
2226 c->Request.CDB[0] = cmd; /* abort */
2227 c->Request.CDB[1] = 0; /* abort a command */
2228 /* buff contains the tag of the command to abort */
2229 memcpy(&c->Request.CDB[4], buff, 8);
2231 case 1: /* RESET message */
2232 c->Request.CDBLen = 16;
2233 c->Request.Type.Attribute = ATTR_SIMPLE;
2234 c->Request.Type.Direction = XFER_NONE;
2235 c->Request.Timeout = 0;
2236 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2237 c->Request.CDB[0] = cmd; /* reset */
2238 c->Request.CDB[1] = 0x03; /* reset a target */
2240 case 3: /* No-Op message */
2241 c->Request.CDBLen = 1;
2242 c->Request.Type.Attribute = ATTR_SIMPLE;
2243 c->Request.Type.Direction = XFER_WRITE;
2244 c->Request.Timeout = 0;
2245 c->Request.CDB[0] = cmd;
2249 "cciss%d: unknown message type %d\n", ctlr, cmd);
2254 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
2257 /* Fill in the scatter gather information */
2259 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
2261 PCI_DMA_BIDIRECTIONAL);
2262 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
2263 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
2264 c->SG[0].Len = size;
2265 c->SG[0].Ext = 0; /* we are not chaining */
2270 static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
2272 switch (c->err_info->ScsiStatus) {
2275 case SAM_STAT_CHECK_CONDITION:
2276 switch (0xf & c->err_info->SenseInfo[2]) {
2277 case 0: return IO_OK; /* no sense */
2278 case 1: return IO_OK; /* recovered error */
2280 printk(KERN_WARNING "cciss%d: cmd 0x%02x "
2281 "check condition, sense key = 0x%02x\n",
2282 h->ctlr, c->Request.CDB[0],
2283 c->err_info->SenseInfo[2]);
2287 printk(KERN_WARNING "cciss%d: cmd 0x%02x"
2288 "scsi status = 0x%02x\n", h->ctlr,
2289 c->Request.CDB[0], c->err_info->ScsiStatus);
2295 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
2297 int return_status = IO_OK;
2299 if (c->err_info->CommandStatus == CMD_SUCCESS)
2302 switch (c->err_info->CommandStatus) {
2303 case CMD_TARGET_STATUS:
2304 return_status = check_target_status(h, c);
2306 case CMD_DATA_UNDERRUN:
2307 case CMD_DATA_OVERRUN:
2308 /* expected for inquiry and report lun commands */
2311 printk(KERN_WARNING "cciss: cmd 0x%02x is "
2312 "reported invalid\n", c->Request.CDB[0]);
2313 return_status = IO_ERROR;
2315 case CMD_PROTOCOL_ERR:
2316 printk(KERN_WARNING "cciss: cmd 0x%02x has "
2317 "protocol error \n", c->Request.CDB[0]);
2318 return_status = IO_ERROR;
2320 case CMD_HARDWARE_ERR:
2321 printk(KERN_WARNING "cciss: cmd 0x%02x had "
2322 " hardware error\n", c->Request.CDB[0]);
2323 return_status = IO_ERROR;
2325 case CMD_CONNECTION_LOST:
2326 printk(KERN_WARNING "cciss: cmd 0x%02x had "
2327 "connection lost\n", c->Request.CDB[0]);
2328 return_status = IO_ERROR;
2331 printk(KERN_WARNING "cciss: cmd 0x%02x was "
2332 "aborted\n", c->Request.CDB[0]);
2333 return_status = IO_ERROR;
2335 case CMD_ABORT_FAILED:
2336 printk(KERN_WARNING "cciss: cmd 0x%02x reports "
2337 "abort failed\n", c->Request.CDB[0]);
2338 return_status = IO_ERROR;
2340 case CMD_UNSOLICITED_ABORT:
2342 "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
2344 return_status = IO_NEEDS_RETRY;
2347 printk(KERN_WARNING "cciss: cmd 0x%02x returned "
2348 "unknown status %x\n", c->Request.CDB[0],
2349 c->err_info->CommandStatus);
2350 return_status = IO_ERROR;
2352 return return_status;
2355 static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
2358 DECLARE_COMPLETION_ONSTACK(wait);
2359 u64bit buff_dma_handle;
2360 unsigned long flags;
2361 int return_status = IO_OK;
2365 /* Put the request on the tail of the queue and send it */
2366 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2370 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2372 wait_for_completion(&wait);
2374 if (c->err_info->CommandStatus == 0 || !attempt_retry)
2377 return_status = process_sendcmd_error(h, c);
2379 if (return_status == IO_NEEDS_RETRY &&
2380 c->retry_count < MAX_CMD_RETRIES) {
2381 printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
2384 /* erase the old error information */
2385 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2386 return_status = IO_OK;
2387 INIT_COMPLETION(wait);
2392 /* unlock the buffers from DMA */
2393 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2394 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2395 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2396 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2397 return return_status;
2400 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
2401 __u8 page_code, unsigned char scsi3addr[],
2404 ctlr_info_t *h = hba[ctlr];
2405 CommandList_struct *c;
2408 c = cmd_alloc(h, 0);
2411 return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
2412 scsi3addr, cmd_type);
2413 if (return_status == IO_OK)
2414 return_status = sendcmd_withirq_core(h, c, 1);
2417 return return_status;
2420 static void cciss_geometry_inquiry(int ctlr, int logvol,
2421 int withirq, sector_t total_size,
2422 unsigned int block_size,
2423 InquiryData_struct *inq_buff,
2424 drive_info_struct *drv)
2428 unsigned char scsi3addr[8];
2430 memset(inq_buff, 0, sizeof(InquiryData_struct));
2431 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2433 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
2434 inq_buff, sizeof(*inq_buff),
2435 0xC1, scsi3addr, TYPE_CMD);
2437 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
2438 sizeof(*inq_buff), 0xC1, scsi3addr,
2440 if (return_code == IO_OK) {
2441 if (inq_buff->data_byte[8] == 0xFF) {
2443 "cciss: reading geometry failed, volume "
2444 "does not support reading geometry\n");
2446 drv->sectors = 32; // Sectors per track
2447 drv->cylinders = total_size + 1;
2448 drv->raid_level = RAID_UNKNOWN;
2450 drv->heads = inq_buff->data_byte[6];
2451 drv->sectors = inq_buff->data_byte[7];
2452 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
2453 drv->cylinders += inq_buff->data_byte[5];
2454 drv->raid_level = inq_buff->data_byte[8];
2456 drv->block_size = block_size;
2457 drv->nr_blocks = total_size + 1;
2458 t = drv->heads * drv->sectors;
2460 sector_t real_size = total_size + 1;
2461 unsigned long rem = sector_div(real_size, t);
2464 drv->cylinders = real_size;
2466 } else { /* Get geometry failed */
2467 printk(KERN_WARNING "cciss: reading geometry failed\n");
2469 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2470 drv->heads, drv->sectors, drv->cylinders);
2474 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2475 unsigned int *block_size)
2477 ReadCapdata_struct *buf;
2479 unsigned char scsi3addr[8];
2481 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2483 printk(KERN_WARNING "cciss: out of memory\n");
2487 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2489 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2490 ctlr, buf, sizeof(ReadCapdata_struct),
2491 0, scsi3addr, TYPE_CMD);
2493 return_code = sendcmd(CCISS_READ_CAPACITY,
2494 ctlr, buf, sizeof(ReadCapdata_struct),
2495 0, scsi3addr, TYPE_CMD);
2496 if (return_code == IO_OK) {
2497 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2498 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2499 } else { /* read capacity command failed */
2500 printk(KERN_WARNING "cciss: read capacity failed\n");
2502 *block_size = BLOCK_SIZE;
2504 if (*total_size != 0)
2505 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2506 (unsigned long long)*total_size+1, *block_size);
2511 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2513 ReadCapdata_struct_16 *buf;
2515 unsigned char scsi3addr[8];
2517 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2519 printk(KERN_WARNING "cciss: out of memory\n");
2523 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2525 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2526 ctlr, buf, sizeof(ReadCapdata_struct_16),
2527 0, scsi3addr, TYPE_CMD);
2530 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2531 ctlr, buf, sizeof(ReadCapdata_struct_16),
2532 0, scsi3addr, TYPE_CMD);
2534 if (return_code == IO_OK) {
2535 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2536 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2537 } else { /* read capacity command failed */
2538 printk(KERN_WARNING "cciss: read capacity failed\n");
2540 *block_size = BLOCK_SIZE;
2542 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2543 (unsigned long long)*total_size+1, *block_size);
2547 static int cciss_revalidate(struct gendisk *disk)
2549 ctlr_info_t *h = get_host(disk);
2550 drive_info_struct *drv = get_drv(disk);
2553 unsigned int block_size;
2554 sector_t total_size;
2555 InquiryData_struct *inq_buff = NULL;
2557 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2558 if (h->drv[logvol].LunID == drv->LunID) {
2567 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2568 if (inq_buff == NULL) {
2569 printk(KERN_WARNING "cciss: out of memory\n");
2572 if (h->cciss_read == CCISS_READ_10) {
2573 cciss_read_capacity(h->ctlr, logvol, 1,
2574 &total_size, &block_size);
2576 cciss_read_capacity_16(h->ctlr, logvol, 1,
2577 &total_size, &block_size);
2579 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2582 blk_queue_logical_block_size(drv->queue, drv->block_size);
2583 set_capacity(disk, drv->nr_blocks);
2590 * Wait polling for a command to complete.
2591 * The memory mapped FIFO is polled for the completion.
2592 * Used only at init time, interrupts from the HBA are disabled.
2594 static unsigned long pollcomplete(int ctlr)
2599 /* Wait (up to 20 seconds) for a command to complete */
2601 for (i = 20 * HZ; i > 0; i--) {
2602 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2603 if (done == FIFO_EMPTY)
2604 schedule_timeout_uninterruptible(1);
2608 /* Invalid address to tell caller we ran out of time */
2612 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2614 /* We get in here if sendcmd() is polling for completions
2615 and gets some command back that it wasn't expecting --
2616 something other than that which it just sent down.
2617 Ordinarily, that shouldn't happen, but it can happen when
2618 the scsi tape stuff gets into error handling mode, and
2619 starts using sendcmd() to try to abort commands and
2620 reset tape drives. In that case, sendcmd may pick up
2621 completions of commands that were sent to logical drives
2622 through the block i/o system, or cciss ioctls completing, etc.
2623 In that case, we need to save those completions for later
2624 processing by the interrupt handler.
2627 #ifdef CONFIG_CISS_SCSI_TAPE
2628 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2630 /* If it's not the scsi tape stuff doing error handling, (abort */
2631 /* or reset) then we don't expect anything weird. */
2632 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2634 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2635 "Invalid command list address returned! (%lx)\n",
2637 /* not much we can do. */
2638 #ifdef CONFIG_CISS_SCSI_TAPE
2642 /* We've sent down an abort or reset, but something else
2644 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2645 /* Uh oh. No room to save it for later... */
2646 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2647 "reject list overflow, command lost!\n", ctlr);
2650 /* Save it for later */
2651 srl->complete[srl->ncompletions] = complete;
2652 srl->ncompletions++;
2657 /* Send command c to controller h and poll for it to complete.
2658 * Turns interrupts off on the board. Used at driver init time
2659 * and during SCSI error recovery.
2661 static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
2664 unsigned long complete;
2665 int status = IO_ERROR;
2666 u64bit buff_dma_handle;
2670 /* Disable interrupt on the board. */
2671 h->access.set_intr_mask(h, CCISS_INTR_OFF);
2673 /* Make sure there is room in the command FIFO */
2674 /* Actually it should be completely empty at this time */
2675 /* unless we are in here doing error handling for the scsi */
2676 /* tape side of the driver. */
2677 for (i = 200000; i > 0; i--) {
2678 /* if fifo isn't full go */
2679 if (!(h->access.fifo_full(h)))
2682 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2683 " waiting!\n", h->ctlr);
2685 h->access.submit_command(h, c); /* Send the cmd */
2687 complete = pollcomplete(h->ctlr);
2690 printk(KERN_DEBUG "cciss: command completed\n");
2691 #endif /* CCISS_DEBUG */
2693 if (complete == 1) {
2695 "cciss cciss%d: SendCmd Timeout out, "
2696 "No command list address returned!\n", h->ctlr);
2701 /* If it's not the cmd we're looking for, save it for later */
2702 if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
2703 if (add_sendcmd_reject(c->Request.CDB[0],
2704 h->ctlr, complete) != 0)
2705 BUG(); /* we are hosed if we get here. */
2709 /* It is our command. If no error, we're done. */
2710 if (!(complete & CISS_ERROR_BIT)) {
2715 /* There is an error... */
2717 /* if data overrun or underun on Report command ignore it */
2718 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2719 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2720 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2721 ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
2722 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
2723 complete = c->busaddr;
2728 if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
2729 printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
2731 if (c->retry_count < MAX_CMD_RETRIES) {
2732 printk(KERN_WARNING "cciss%d: retrying %p\n",
2735 /* erase the old error information */
2736 memset(c->err_info, 0, sizeof(c->err_info));
2739 printk(KERN_WARNING "cciss%d: retried %p too many "
2740 "times\n", h->ctlr, c);
2745 if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2746 printk(KERN_WARNING "cciss%d: command could not be "
2747 "aborted.\n", h->ctlr);
2752 if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
2753 status = check_target_status(h, c);
2757 printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
2758 printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
2759 c->Request.CDB[0], c->err_info->CommandStatus);
2765 /* unlock the data buffer from DMA */
2766 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2767 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2768 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2769 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2770 #ifdef CONFIG_CISS_SCSI_TAPE
2771 /* if we saved some commands for later, process them now. */
2772 if (h->scsi_rejects.ncompletions > 0)
2773 do_cciss_intr(0, h);
2779 * Send a command to the controller, and wait for it to complete.
2780 * Used at init time, and during SCSI error recovery.
2782 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
2783 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2785 CommandList_struct *c;
2788 c = cmd_alloc(hba[ctlr], 1);
2790 printk(KERN_WARNING "cciss: unable to get memory");
2793 status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
2794 scsi3addr, cmd_type);
2795 if (status == IO_OK)
2796 status = sendcmd_core(hba[ctlr], c);
2797 cmd_free(hba[ctlr], c, 1);
2802 * Map (physical) PCI mem into (virtual) kernel space
2804 static void __iomem *remap_pci_mem(ulong base, ulong size)
2806 ulong page_base = ((ulong) base) & PAGE_MASK;
2807 ulong page_offs = ((ulong) base) - page_base;
2808 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2810 return page_remapped ? (page_remapped + page_offs) : NULL;
2814 * Takes jobs of the Q and sends them to the hardware, then puts it on
2815 * the Q to wait for completion.
2817 static void start_io(ctlr_info_t *h)
2819 CommandList_struct *c;
2821 while (!hlist_empty(&h->reqQ)) {
2822 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
2823 /* can't do anything if fifo is full */
2824 if ((h->access.fifo_full(h))) {
2825 printk(KERN_WARNING "cciss: fifo full\n");
2829 /* Get the first entry from the Request Q */
2833 /* Tell the controller execute command */
2834 h->access.submit_command(h, c);
2836 /* Put job onto the completed Q */
2841 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2842 /* Zeros out the error record and then resends the command back */
2843 /* to the controller */
2844 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2846 /* erase the old error information */
2847 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2849 /* add it to software queue and then send it to the controller */
2852 if (h->Qdepth > h->maxQsinceinit)
2853 h->maxQsinceinit = h->Qdepth;
2858 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2859 unsigned int msg_byte, unsigned int host_byte,
2860 unsigned int driver_byte)
2862 /* inverse of macros in scsi.h */
2863 return (scsi_status_byte & 0xff) |
2864 ((msg_byte & 0xff) << 8) |
2865 ((host_byte & 0xff) << 16) |
2866 ((driver_byte & 0xff) << 24);
2869 static inline int evaluate_target_status(ctlr_info_t *h,
2870 CommandList_struct *cmd, int *retry_cmd)
2872 unsigned char sense_key;
2873 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2877 /* If we get in here, it means we got "target status", that is, scsi status */
2878 status_byte = cmd->err_info->ScsiStatus;
2879 driver_byte = DRIVER_OK;
2880 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2882 if (blk_pc_request(cmd->rq))
2883 host_byte = DID_PASSTHROUGH;
2887 error_value = make_status_bytes(status_byte, msg_byte,
2888 host_byte, driver_byte);
2890 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2891 if (!blk_pc_request(cmd->rq))
2892 printk(KERN_WARNING "cciss: cmd %p "
2893 "has SCSI Status 0x%x\n",
2894 cmd, cmd->err_info->ScsiStatus);
2898 /* check the sense key */
2899 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2900 /* no status or recovered error */
2901 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2904 if (check_for_unit_attention(h, cmd)) {
2905 *retry_cmd = !blk_pc_request(cmd->rq);
2909 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2910 if (error_value != 0)
2911 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2912 " sense key = 0x%x\n", cmd, sense_key);
2916 /* SG_IO or similar, copy sense data back */
2917 if (cmd->rq->sense) {
2918 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2919 cmd->rq->sense_len = cmd->err_info->SenseLen;
2920 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2921 cmd->rq->sense_len);
2923 cmd->rq->sense_len = 0;
2928 /* checks the status of the job and calls complete buffers to mark all
2929 * buffers for the completed job. Note that this function does not need
2930 * to hold the hba/queue lock.
2932 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2936 struct request *rq = cmd->rq;
2941 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2943 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2944 goto after_error_processing;
2946 switch (cmd->err_info->CommandStatus) {
2947 case CMD_TARGET_STATUS:
2948 rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
2950 case CMD_DATA_UNDERRUN:
2951 if (blk_fs_request(cmd->rq)) {
2952 printk(KERN_WARNING "cciss: cmd %p has"
2953 " completed with data underrun "
2955 cmd->rq->resid_len = cmd->err_info->ResidualCnt;
2958 case CMD_DATA_OVERRUN:
2959 if (blk_fs_request(cmd->rq))
2960 printk(KERN_WARNING "cciss: cmd %p has"
2961 " completed with data overrun "
2965 printk(KERN_WARNING "cciss: cmd %p is "
2966 "reported invalid\n", cmd);
2967 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2968 cmd->err_info->CommandStatus, DRIVER_OK,
2969 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2971 case CMD_PROTOCOL_ERR:
2972 printk(KERN_WARNING "cciss: cmd %p has "
2973 "protocol error \n", cmd);
2974 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2975 cmd->err_info->CommandStatus, DRIVER_OK,
2976 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2978 case CMD_HARDWARE_ERR:
2979 printk(KERN_WARNING "cciss: cmd %p had "
2980 " hardware error\n", cmd);
2981 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2982 cmd->err_info->CommandStatus, DRIVER_OK,
2983 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2985 case CMD_CONNECTION_LOST:
2986 printk(KERN_WARNING "cciss: cmd %p had "
2987 "connection lost\n", cmd);
2988 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2989 cmd->err_info->CommandStatus, DRIVER_OK,
2990 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2993 printk(KERN_WARNING "cciss: cmd %p was "
2995 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2996 cmd->err_info->CommandStatus, DRIVER_OK,
2997 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2999 case CMD_ABORT_FAILED:
3000 printk(KERN_WARNING "cciss: cmd %p reports "
3001 "abort failed\n", cmd);
3002 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3003 cmd->err_info->CommandStatus, DRIVER_OK,
3004 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
3006 case CMD_UNSOLICITED_ABORT:
3007 printk(KERN_WARNING "cciss%d: unsolicited "
3008 "abort %p\n", h->ctlr, cmd);
3009 if (cmd->retry_count < MAX_CMD_RETRIES) {
3012 "cciss%d: retrying %p\n", h->ctlr, cmd);
3016 "cciss%d: %p retried too "
3017 "many times\n", h->ctlr, cmd);
3018 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3019 cmd->err_info->CommandStatus, DRIVER_OK,
3020 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
3023 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
3024 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3025 cmd->err_info->CommandStatus, DRIVER_OK,
3026 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
3029 printk(KERN_WARNING "cciss: cmd %p returned "
3030 "unknown status %x\n", cmd,
3031 cmd->err_info->CommandStatus);
3032 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3033 cmd->err_info->CommandStatus, DRIVER_OK,
3034 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
3037 after_error_processing:
3039 /* We need to return this command */
3041 resend_cciss_cmd(h, cmd);
3044 cmd->rq->completion_data = cmd;
3045 blk_complete_request(cmd->rq);
3049 * Get a request and submit it to the controller.
3051 static void do_cciss_request(struct request_queue *q)
3053 ctlr_info_t *h = q->queuedata;
3054 CommandList_struct *c;
3057 struct request *creq;
3059 struct scatterlist tmp_sg[MAXSGENTRIES];
3060 drive_info_struct *drv;
3063 /* We call start_io here in case there is a command waiting on the
3064 * queue that has not been sent.
3066 if (blk_queue_plugged(q))
3070 creq = blk_peek_request(q);
3074 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
3076 if ((c = cmd_alloc(h, 1)) == NULL)
3079 blk_start_request(creq);
3081 spin_unlock_irq(q->queue_lock);
3083 c->cmd_type = CMD_RWREQ;
3086 /* fill in the request */
3087 drv = creq->rq_disk->private_data;
3088 c->Header.ReplyQueue = 0; // unused in simple mode
3089 /* got command from pool, so use the command block index instead */
3090 /* for direct lookups. */
3091 /* The first 2 bits are reserved for controller error reporting. */
3092 c->Header.Tag.lower = (c->cmdindex << 3);
3093 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
3094 c->Header.LUN.LogDev.VolId = drv->LunID;
3095 c->Header.LUN.LogDev.Mode = 1;
3096 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
3097 c->Request.Type.Type = TYPE_CMD; // It is a command.
3098 c->Request.Type.Attribute = ATTR_SIMPLE;
3099 c->Request.Type.Direction =
3100 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
3101 c->Request.Timeout = 0; // Don't time out
3103 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
3104 start_blk = blk_rq_pos(creq);
3106 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
3107 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
3108 #endif /* CCISS_DEBUG */
3110 sg_init_table(tmp_sg, MAXSGENTRIES);
3111 seg = blk_rq_map_sg(q, creq, tmp_sg);
3113 /* get the DMA records for the setup */
3114 if (c->Request.Type.Direction == XFER_READ)
3115 dir = PCI_DMA_FROMDEVICE;
3117 dir = PCI_DMA_TODEVICE;
3119 for (i = 0; i < seg; i++) {
3120 c->SG[i].Len = tmp_sg[i].length;
3121 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
3123 tmp_sg[i].length, dir);
3124 c->SG[i].Addr.lower = temp64.val32.lower;
3125 c->SG[i].Addr.upper = temp64.val32.upper;
3126 c->SG[i].Ext = 0; // we are not chaining
3128 /* track how many SG entries we are using */
3133 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
3134 blk_rq_sectors(creq), seg);
3135 #endif /* CCISS_DEBUG */
3137 c->Header.SGList = c->Header.SGTotal = seg;
3138 if (likely(blk_fs_request(creq))) {
3139 if(h->cciss_read == CCISS_READ_10) {
3140 c->Request.CDB[1] = 0;
3141 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
3142 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
3143 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
3144 c->Request.CDB[5] = start_blk & 0xff;
3145 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
3146 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
3147 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
3148 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
3150 u32 upper32 = upper_32_bits(start_blk);
3152 c->Request.CDBLen = 16;
3153 c->Request.CDB[1]= 0;
3154 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
3155 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
3156 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
3157 c->Request.CDB[5]= upper32 & 0xff;
3158 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
3159 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
3160 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
3161 c->Request.CDB[9]= start_blk & 0xff;
3162 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
3163 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
3164 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
3165 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
3166 c->Request.CDB[14] = c->Request.CDB[15] = 0;
3168 } else if (blk_pc_request(creq)) {
3169 c->Request.CDBLen = creq->cmd_len;
3170 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
3172 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
3176 spin_lock_irq(q->queue_lock);
3180 if (h->Qdepth > h->maxQsinceinit)
3181 h->maxQsinceinit = h->Qdepth;
3187 /* We will already have the driver lock here so not need
3193 static inline unsigned long get_next_completion(ctlr_info_t *h)
3195 #ifdef CONFIG_CISS_SCSI_TAPE
3196 /* Any rejects from sendcmd() lying around? Process them first */
3197 if (h->scsi_rejects.ncompletions == 0)
3198 return h->access.command_completed(h);
3200 struct sendcmd_reject_list *srl;
3202 srl = &h->scsi_rejects;
3203 n = --srl->ncompletions;
3204 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
3206 return srl->complete[n];
3209 return h->access.command_completed(h);
3213 static inline int interrupt_pending(ctlr_info_t *h)
3215 #ifdef CONFIG_CISS_SCSI_TAPE
3216 return (h->access.intr_pending(h)
3217 || (h->scsi_rejects.ncompletions > 0));
3219 return h->access.intr_pending(h);
3223 static inline long interrupt_not_for_us(ctlr_info_t *h)
3225 #ifdef CONFIG_CISS_SCSI_TAPE
3226 return (((h->access.intr_pending(h) == 0) ||
3227 (h->interrupts_enabled == 0))
3228 && (h->scsi_rejects.ncompletions == 0));
3230 return (((h->access.intr_pending(h) == 0) ||
3231 (h->interrupts_enabled == 0)));
3235 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
3237 ctlr_info_t *h = dev_id;
3238 CommandList_struct *c;
3239 unsigned long flags;
3242 if (interrupt_not_for_us(h))
3245 * If there are completed commands in the completion queue,
3246 * we had better do something about it.
3248 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
3249 while (interrupt_pending(h)) {
3250 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
3254 if (a2 >= h->nr_cmds) {
3256 "cciss: controller cciss%d failed, stopping.\n",
3258 fail_all_cmds(h->ctlr);
3262 c = h->cmd_pool + a2;
3266 struct hlist_node *tmp;
3270 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
3271 if (c->busaddr == a)
3276 * If we've found the command, take it off the
3277 * completion Q and free it
3279 if (c && c->busaddr == a) {
3281 if (c->cmd_type == CMD_RWREQ) {
3282 complete_command(h, c, 0);
3283 } else if (c->cmd_type == CMD_IOCTL_PEND) {
3284 complete(c->waiting);
3286 # ifdef CONFIG_CISS_SCSI_TAPE
3287 else if (c->cmd_type == CMD_SCSI)
3288 complete_scsi_command(c, 0, a1);
3295 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
3299 static int scan_thread(void *data)
3301 ctlr_info_t *h = data;
3303 DECLARE_COMPLETION_ONSTACK(wait);
3304 h->rescan_wait = &wait;
3307 rc = wait_for_completion_interruptible(&wait);
3308 if (kthread_should_stop())
3311 rebuild_lun_table(h, 0);
3316 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
3318 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
3321 switch (c->err_info->SenseInfo[12]) {
3323 printk(KERN_WARNING "cciss%d: a state change "
3324 "detected, command retried\n", h->ctlr);
3328 printk(KERN_WARNING "cciss%d: LUN failure "
3329 "detected, action required\n", h->ctlr);
3332 case REPORT_LUNS_CHANGED:
3333 printk(KERN_WARNING "cciss%d: report LUN data "
3334 "changed\n", h->ctlr);
3336 complete(h->rescan_wait);
3339 case POWER_OR_RESET:
3340 printk(KERN_WARNING "cciss%d: a power on "
3341 "or device reset detected\n", h->ctlr);
3344 case UNIT_ATTENTION_CLEARED:
3345 printk(KERN_WARNING "cciss%d: unit attention "
3346 "cleared by another initiator\n", h->ctlr);
3350 printk(KERN_WARNING "cciss%d: unknown "
3351 "unit attention detected\n", h->ctlr);
3357 * We cannot read the structure directly, for portability we must use
3359 * This is for debug only.
3362 static void print_cfg_table(CfgTable_struct *tb)
3367 printk("Controller Configuration information\n");
3368 printk("------------------------------------\n");
3369 for (i = 0; i < 4; i++)
3370 temp_name[i] = readb(&(tb->Signature[i]));
3371 temp_name[4] = '\0';
3372 printk(" Signature = %s\n", temp_name);
3373 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
3374 printk(" Transport methods supported = 0x%x\n",
3375 readl(&(tb->TransportSupport)));
3376 printk(" Transport methods active = 0x%x\n",
3377 readl(&(tb->TransportActive)));
3378 printk(" Requested transport Method = 0x%x\n",
3379 readl(&(tb->HostWrite.TransportRequest)));
3380 printk(" Coalesce Interrupt Delay = 0x%x\n",
3381 readl(&(tb->HostWrite.CoalIntDelay)));
3382 printk(" Coalesce Interrupt Count = 0x%x\n",
3383 readl(&(tb->HostWrite.CoalIntCount)));
3384 printk(" Max outstanding commands = 0x%d\n",
3385 readl(&(tb->CmdsOutMax)));
3386 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3387 for (i = 0; i < 16; i++)
3388 temp_name[i] = readb(&(tb->ServerName[i]));
3389 temp_name[16] = '\0';
3390 printk(" Server Name = %s\n", temp_name);
3391 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
3393 #endif /* CCISS_DEBUG */
3395 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3397 int i, offset, mem_type, bar_type;
3398 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3401 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3402 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3403 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3406 mem_type = pci_resource_flags(pdev, i) &
3407 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3409 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3410 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3411 offset += 4; /* 32 bit */
3413 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3416 default: /* reserved in PCI 2.2 */
3418 "Base address is invalid\n");
3423 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3429 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3430 * controllers that are capable. If not, we use IO-APIC mode.
3433 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
3434 struct pci_dev *pdev, __u32 board_id)
3436 #ifdef CONFIG_PCI_MSI
3438 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
3442 /* Some boards advertise MSI but don't really support it */
3443 if ((board_id == 0x40700E11) ||
3444 (board_id == 0x40800E11) ||
3445 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3446 goto default_int_mode;
3448 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3449 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
3451 c->intr[0] = cciss_msix_entries[0].vector;
3452 c->intr[1] = cciss_msix_entries[1].vector;
3453 c->intr[2] = cciss_msix_entries[2].vector;
3454 c->intr[3] = cciss_msix_entries[3].vector;
3459 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
3460 "available\n", err);
3461 goto default_int_mode;
3463 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
3465 goto default_int_mode;
3468 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3469 if (!pci_enable_msi(pdev)) {
3472 printk(KERN_WARNING "cciss: MSI init failed\n");
3476 #endif /* CONFIG_PCI_MSI */
3477 /* if we get here we're going to use the default interrupt mode */
3478 c->intr[SIMPLE_MODE_INT] = pdev->irq;
3482 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3484 ushort subsystem_vendor_id, subsystem_device_id, command;
3485 __u32 board_id, scratchpad = 0;
3487 __u32 cfg_base_addr;
3488 __u64 cfg_base_addr_index;
3491 /* check to see if controller has been disabled */
3492 /* BEFORE trying to enable it */
3493 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3494 if (!(command & 0x02)) {
3496 "cciss: controller appears to be disabled\n");
3500 err = pci_enable_device(pdev);
3502 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
3506 err = pci_request_regions(pdev, "cciss");
3508 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3513 subsystem_vendor_id = pdev->subsystem_vendor;
3514 subsystem_device_id = pdev->subsystem_device;
3515 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3516 subsystem_vendor_id);
3519 printk("command = %x\n", command);
3520 printk("irq = %x\n", pdev->irq);
3521 printk("board_id = %x\n", board_id);
3522 #endif /* CCISS_DEBUG */
3524 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3525 * else we use the IO-APIC interrupt assigned to us by system ROM.
3527 cciss_interrupt_mode(c, pdev, board_id);
3529 /* find the memory BAR */
3530 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3531 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3534 if (i == DEVICE_COUNT_RESOURCE) {
3535 printk(KERN_WARNING "cciss: No memory BAR found\n");
3537 goto err_out_free_res;
3540 c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3545 printk("address 0 = %lx\n", c->paddr);
3546 #endif /* CCISS_DEBUG */
3547 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3549 /* Wait for the board to become ready. (PCI hotplug needs this.)
3550 * We poll for up to 120 secs, once per 100ms. */
3551 for (i = 0; i < 1200; i++) {
3552 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3553 if (scratchpad == CCISS_FIRMWARE_READY)
3555 set_current_state(TASK_INTERRUPTIBLE);
3556 schedule_timeout(HZ / 10); /* wait 100ms */
3558 if (scratchpad != CCISS_FIRMWARE_READY) {
3559 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3561 goto err_out_free_res;
3564 /* get the address index number */
3565 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3566 cfg_base_addr &= (__u32) 0x0000ffff;
3568 printk("cfg base address = %x\n", cfg_base_addr);
3569 #endif /* CCISS_DEBUG */
3570 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3572 printk("cfg base address index = %llx\n",
3573 (unsigned long long)cfg_base_addr_index);
3574 #endif /* CCISS_DEBUG */
3575 if (cfg_base_addr_index == -1) {
3576 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3578 goto err_out_free_res;
3581 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3583 printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
3584 #endif /* CCISS_DEBUG */
3585 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3586 cfg_base_addr_index) +
3587 cfg_offset, sizeof(CfgTable_struct));
3588 c->board_id = board_id;
3591 print_cfg_table(c->cfgtable);
3592 #endif /* CCISS_DEBUG */
3594 /* Some controllers support Zero Memory Raid (ZMR).
3595 * When configured in ZMR mode the number of supported
3596 * commands drops to 64. So instead of just setting an
3597 * arbitrary value we make the driver a little smarter.
3598 * We read the config table to tell us how many commands
3599 * are supported on the controller then subtract 4 to
3600 * leave a little room for ioctl calls.
3602 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3603 for (i = 0; i < ARRAY_SIZE(products); i++) {
3604 if (board_id == products[i].board_id) {
3605 c->product_name = products[i].product_name;
3606 c->access = *(products[i].access);
3607 c->nr_cmds = c->max_commands - 4;
3611 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3612 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3613 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3614 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3615 printk("Does not appear to be a valid CISS config table\n");
3617 goto err_out_free_res;
3619 /* We didn't find the controller in our list. We know the
3620 * signature is valid. If it's an HP device let's try to
3621 * bind to the device and fire it up. Otherwise we bail.
3623 if (i == ARRAY_SIZE(products)) {
3624 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3625 c->product_name = products[i-1].product_name;
3626 c->access = *(products[i-1].access);
3627 c->nr_cmds = c->max_commands - 4;
3628 printk(KERN_WARNING "cciss: This is an unknown "
3629 "Smart Array controller.\n"
3630 "cciss: Please update to the latest driver "
3631 "available from www.hp.com.\n");
3633 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3634 " to access the Smart Array controller %08lx\n"
3635 , (unsigned long)board_id);
3637 goto err_out_free_res;
3642 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3644 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3646 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3650 /* Disabling DMA prefetch and refetch for the P600.
3651 * An ASIC bug may result in accesses to invalid memory addresses.
3652 * We've disabled prefetch for some time now. Testing with XEN
3653 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3655 if(board_id == 0x3225103C) {
3658 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3659 dma_prefetch |= 0x8000;
3660 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3661 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3663 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3667 printk("Trying to put board into Simple mode\n");
3668 #endif /* CCISS_DEBUG */
3669 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3670 /* Update the field, and then ring the doorbell */
3671 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3672 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3674 /* under certain very rare conditions, this can take awhile.
3675 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3676 * as we enter this code.) */
3677 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3678 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3680 /* delay and try again */
3681 set_current_state(TASK_INTERRUPTIBLE);
3682 schedule_timeout(10);
3686 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3687 readl(c->vaddr + SA5_DOORBELL));
3688 #endif /* CCISS_DEBUG */
3690 print_cfg_table(c->cfgtable);
3691 #endif /* CCISS_DEBUG */
3693 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3694 printk(KERN_WARNING "cciss: unable to get board into"
3697 goto err_out_free_res;
3703 * Deliberately omit pci_disable_device(): it does something nasty to
3704 * Smart Array controllers that pci_enable_device does not undo
3706 pci_release_regions(pdev);
3710 /* Function to find the first free pointer into our hba[] array
3711 * Returns -1 if no free entries are left.
3713 static int alloc_cciss_hba(void)
3717 for (i = 0; i < MAX_CTLR; i++) {
3721 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3728 printk(KERN_WARNING "cciss: This driver supports a maximum"
3729 " of %d controllers.\n", MAX_CTLR);
3732 printk(KERN_ERR "cciss: out of memory.\n");
3736 static void free_hba(int i)
3738 ctlr_info_t *p = hba[i];
3742 for (n = 0; n < CISS_MAX_LUN; n++)
3743 put_disk(p->gendisk[n]);
3747 /* Send a message CDB to the firmware. */
3748 static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type)
3751 CommandListHeader_struct CommandHeader;
3752 RequestBlock_struct Request;
3753 ErrDescriptor_struct ErrorDescriptor;
3755 static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct);
3758 uint32_t paddr32, tag;
3759 void __iomem *vaddr;
3762 vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
3766 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3767 CCISS commands, so they must be allocated from the lower 4GiB of
3769 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3775 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3781 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3782 although there's no guarantee, we assume that the address is at
3783 least 4-byte aligned (most likely, it's page-aligned). */
3786 cmd->CommandHeader.ReplyQueue = 0;
3787 cmd->CommandHeader.SGList = 0;
3788 cmd->CommandHeader.SGTotal = 0;
3789 cmd->CommandHeader.Tag.lower = paddr32;
3790 cmd->CommandHeader.Tag.upper = 0;
3791 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3793 cmd->Request.CDBLen = 16;
3794 cmd->Request.Type.Type = TYPE_MSG;
3795 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3796 cmd->Request.Type.Direction = XFER_NONE;
3797 cmd->Request.Timeout = 0; /* Don't time out */
3798 cmd->Request.CDB[0] = opcode;
3799 cmd->Request.CDB[1] = type;
3800 memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */
3802 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command);
3803 cmd->ErrorDescriptor.Addr.upper = 0;
3804 cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct);
3806 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3808 for (i = 0; i < 10; i++) {
3809 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3810 if ((tag & ~3) == paddr32)
3812 schedule_timeout_uninterruptible(HZ);
3817 /* we leak the DMA buffer here ... no choice since the controller could
3818 still complete the command. */
3820 printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
3825 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3828 printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
3833 printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
3838 #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
3839 #define cciss_noop(p) cciss_message(p, 3, 0)
3841 static __devinit int cciss_reset_msi(struct pci_dev *pdev)
3843 /* the #defines are stolen from drivers/pci/msi.h. */
3844 #define msi_control_reg(base) (base + PCI_MSI_FLAGS)
3845 #define PCI_MSIX_FLAGS_ENABLE (1 << 15)
3850 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3852 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3853 if (control & PCI_MSI_FLAGS_ENABLE) {
3854 printk(KERN_INFO "cciss: resetting MSI\n");
3855 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
3859 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3861 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3862 if (control & PCI_MSIX_FLAGS_ENABLE) {
3863 printk(KERN_INFO "cciss: resetting MSI-X\n");
3864 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
3871 /* This does a hard reset of the controller using PCI power management
3873 static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
3875 u16 pmcsr, saved_config_space[32];
3878 printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
3880 /* This is very nearly the same thing as
3882 pci_save_state(pci_dev);
3883 pci_set_power_state(pci_dev, PCI_D3hot);
3884 pci_set_power_state(pci_dev, PCI_D0);
3885 pci_restore_state(pci_dev);
3887 but we can't use these nice canned kernel routines on
3888 kexec, because they also check the MSI/MSI-X state in PCI
3889 configuration space and do the wrong thing when it is
3890 set/cleared. Also, the pci_save/restore_state functions
3891 violate the ordering requirements for restoring the
3892 configuration space from the CCISS document (see the
3893 comment below). So we roll our own .... */
3895 for (i = 0; i < 32; i++)
3896 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3898 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3900 printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
3904 /* Quoting from the Open CISS Specification: "The Power
3905 * Management Control/Status Register (CSR) controls the power
3906 * state of the device. The normal operating state is D0,
3907 * CSR=00h. The software off state is D3, CSR=03h. To reset
3908 * the controller, place the interface device in D3 then to
3909 * D0, this causes a secondary PCI reset which will reset the
3912 /* enter the D3hot power management state */
3913 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3914 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3916 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3918 schedule_timeout_uninterruptible(HZ >> 1);
3920 /* enter the D0 power management state */
3921 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3923 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3925 schedule_timeout_uninterruptible(HZ >> 1);
3927 /* Restore the PCI configuration space. The Open CISS
3928 * Specification says, "Restore the PCI Configuration
3929 * Registers, offsets 00h through 60h. It is important to
3930 * restore the command register, 16-bits at offset 04h,
3931 * last. Do not restore the configuration status register,
3932 * 16-bits at offset 06h." Note that the offset is 2*i. */
3933 for (i = 0; i < 32; i++) {
3934 if (i == 2 || i == 3)
3936 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3939 pci_write_config_word(pdev, 4, saved_config_space[2]);
3945 * This is it. Find all the controllers and register them. I really hate
3946 * stealing all these major device numbers.
3947 * returns the number of block devices registered.
3949 static int __devinit cciss_init_one(struct pci_dev *pdev,
3950 const struct pci_device_id *ent)
3955 int dac, return_code;
3956 InquiryData_struct *inq_buff = NULL;
3958 if (reset_devices) {
3959 /* Reset the controller with a PCI power-cycle */
3960 if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
3963 /* Now try to get the controller to respond to a no-op. Some
3964 devices (notably the HP Smart Array 5i Controller) need
3965 up to 30 seconds to respond. */
3966 for (i=0; i<30; i++) {
3967 if (cciss_noop(pdev) == 0)
3970 schedule_timeout_uninterruptible(HZ);
3973 printk(KERN_ERR "cciss: controller seems dead\n");
3978 i = alloc_cciss_hba();
3982 hba[i]->busy_initializing = 1;
3983 INIT_HLIST_HEAD(&hba[i]->cmpQ);
3984 INIT_HLIST_HEAD(&hba[i]->reqQ);
3986 if (cciss_pci_init(hba[i], pdev) != 0)
3989 sprintf(hba[i]->devname, "cciss%d", i);
3991 hba[i]->pdev = pdev;
3993 if (cciss_create_hba_sysfs_entry(hba[i]))
3996 /* configure PCI DMA stuff */
3997 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
3999 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
4002 printk(KERN_ERR "cciss: no suitable DMA available\n");
4007 * register with the major number, or get a dynamic major number
4008 * by passing 0 as argument. This is done for greater than
4009 * 8 controller support.
4011 if (i < MAX_CTLR_ORIG)
4012 hba[i]->major = COMPAQ_CISS_MAJOR + i;
4013 rc = register_blkdev(hba[i]->major, hba[i]->devname);
4014 if (rc == -EBUSY || rc == -EINVAL) {
4016 "cciss: Unable to get major number %d for %s "
4017 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
4020 if (i >= MAX_CTLR_ORIG)
4024 /* make sure the board interrupts are off */
4025 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
4026 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
4027 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
4028 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
4029 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
4033 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
4034 hba[i]->devname, pdev->device, pci_name(pdev),
4035 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
4037 hba[i]->cmd_pool_bits =
4038 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
4039 * sizeof(unsigned long), GFP_KERNEL);
4040 hba[i]->cmd_pool = (CommandList_struct *)
4041 pci_alloc_consistent(hba[i]->pdev,
4042 hba[i]->nr_cmds * sizeof(CommandList_struct),
4043 &(hba[i]->cmd_pool_dhandle));
4044 hba[i]->errinfo_pool = (ErrorInfo_struct *)
4045 pci_alloc_consistent(hba[i]->pdev,
4046 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
4047 &(hba[i]->errinfo_pool_dhandle));
4048 if ((hba[i]->cmd_pool_bits == NULL)
4049 || (hba[i]->cmd_pool == NULL)
4050 || (hba[i]->errinfo_pool == NULL)) {
4051 printk(KERN_ERR "cciss: out of memory");
4054 #ifdef CONFIG_CISS_SCSI_TAPE
4055 hba[i]->scsi_rejects.complete =
4056 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
4057 (hba[i]->nr_cmds + 5), GFP_KERNEL);
4058 if (hba[i]->scsi_rejects.complete == NULL) {
4059 printk(KERN_ERR "cciss: out of memory");
4063 spin_lock_init(&hba[i]->lock);
4065 /* Initialize the pdev driver private data.
4066 have it point to hba[i]. */
4067 pci_set_drvdata(pdev, hba[i]);
4068 /* command and error info recs zeroed out before
4070 memset(hba[i]->cmd_pool_bits, 0,
4071 DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
4072 * sizeof(unsigned long));
4074 hba[i]->num_luns = 0;
4075 hba[i]->highest_lun = -1;
4076 for (j = 0; j < CISS_MAX_LUN; j++) {
4077 hba[i]->drv[j].raid_level = -1;
4078 hba[i]->drv[j].queue = NULL;
4079 hba[i]->gendisk[j] = NULL;
4082 cciss_scsi_setup(i);
4084 /* Turn the interrupts on so we can service requests */
4085 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
4087 /* Get the firmware version */
4088 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
4089 if (inq_buff == NULL) {
4090 printk(KERN_ERR "cciss: out of memory\n");
4094 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
4095 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
4096 if (return_code == IO_OK) {
4097 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
4098 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
4099 hba[i]->firm_ver[2] = inq_buff->data_byte[34];
4100 hba[i]->firm_ver[3] = inq_buff->data_byte[35];
4101 } else { /* send command failed */
4102 printk(KERN_WARNING "cciss: unable to determine firmware"
4103 " version of controller\n");
4108 hba[i]->cciss_max_sectors = 2048;
4110 hba[i]->busy_initializing = 0;
4112 rebuild_lun_table(hba[i], 1);
4113 hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i],
4114 "cciss_scan%02d", i);
4115 if (IS_ERR(hba[i]->cciss_scan_thread))
4116 return PTR_ERR(hba[i]->cciss_scan_thread);
4122 #ifdef CONFIG_CISS_SCSI_TAPE
4123 kfree(hba[i]->scsi_rejects.complete);
4125 kfree(hba[i]->cmd_pool_bits);
4126 if (hba[i]->cmd_pool)
4127 pci_free_consistent(hba[i]->pdev,
4128 hba[i]->nr_cmds * sizeof(CommandList_struct),
4129 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
4130 if (hba[i]->errinfo_pool)
4131 pci_free_consistent(hba[i]->pdev,
4132 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
4133 hba[i]->errinfo_pool,
4134 hba[i]->errinfo_pool_dhandle);
4135 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
4137 unregister_blkdev(hba[i]->major, hba[i]->devname);
4139 cciss_destroy_hba_sysfs_entry(hba[i]);
4141 hba[i]->busy_initializing = 0;
4142 /* cleanup any queues that may have been initialized */
4143 for (j=0; j <= hba[i]->highest_lun; j++){
4144 drive_info_struct *drv = &(hba[i]->drv[j]);
4146 blk_cleanup_queue(drv->queue);
4149 * Deliberately omit pci_disable_device(): it does something nasty to
4150 * Smart Array controllers that pci_enable_device does not undo
4152 pci_release_regions(pdev);
4153 pci_set_drvdata(pdev, NULL);
4158 static void cciss_shutdown(struct pci_dev *pdev)
4160 ctlr_info_t *tmp_ptr;
4165 tmp_ptr = pci_get_drvdata(pdev);
4166 if (tmp_ptr == NULL)
4172 /* Turn board interrupts off and send the flush cache command */
4173 /* sendcmd will turn off interrupt, and send the flush...
4174 * To write all data in the battery backed cache to disks */
4175 memset(flush_buf, 0, 4);
4176 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
4177 CTLR_LUNID, TYPE_CMD);
4178 if (return_code == IO_OK) {
4179 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
4181 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
4183 free_irq(hba[i]->intr[2], hba[i]);
4186 static void __devexit cciss_remove_one(struct pci_dev *pdev)
4188 ctlr_info_t *tmp_ptr;
4191 if (pci_get_drvdata(pdev) == NULL) {
4192 printk(KERN_ERR "cciss: Unable to remove device \n");
4196 tmp_ptr = pci_get_drvdata(pdev);
4198 if (hba[i] == NULL) {
4199 printk(KERN_ERR "cciss: device appears to "
4200 "already be removed \n");
4204 kthread_stop(hba[i]->cciss_scan_thread);
4206 remove_proc_entry(hba[i]->devname, proc_cciss);
4207 unregister_blkdev(hba[i]->major, hba[i]->devname);
4209 /* remove it from the disk list */
4210 for (j = 0; j < CISS_MAX_LUN; j++) {
4211 struct gendisk *disk = hba[i]->gendisk[j];
4213 struct request_queue *q = disk->queue;
4215 if (disk->flags & GENHD_FL_UP)
4218 blk_cleanup_queue(q);
4222 #ifdef CONFIG_CISS_SCSI_TAPE
4223 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
4226 cciss_shutdown(pdev);
4228 #ifdef CONFIG_PCI_MSI
4229 if (hba[i]->msix_vector)
4230 pci_disable_msix(hba[i]->pdev);
4231 else if (hba[i]->msi_vector)
4232 pci_disable_msi(hba[i]->pdev);
4233 #endif /* CONFIG_PCI_MSI */
4235 iounmap(hba[i]->vaddr);
4237 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
4238 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
4239 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
4240 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
4241 kfree(hba[i]->cmd_pool_bits);
4242 #ifdef CONFIG_CISS_SCSI_TAPE
4243 kfree(hba[i]->scsi_rejects.complete);
4246 * Deliberately omit pci_disable_device(): it does something nasty to
4247 * Smart Array controllers that pci_enable_device does not undo
4249 pci_release_regions(pdev);
4250 pci_set_drvdata(pdev, NULL);
4251 cciss_destroy_hba_sysfs_entry(hba[i]);
4255 static struct pci_driver cciss_pci_driver = {
4257 .probe = cciss_init_one,
4258 .remove = __devexit_p(cciss_remove_one),
4259 .id_table = cciss_pci_device_id, /* id_table */
4260 .shutdown = cciss_shutdown,
4264 * This is it. Register the PCI driver information for the cards we control
4265 * the OS will call our registered routines when it finds one of our cards.
4267 static int __init cciss_init(void)
4272 * The hardware requires that commands are aligned on a 64-bit
4273 * boundary. Given that we use pci_alloc_consistent() to allocate an
4274 * array of them, the size must be a multiple of 8 bytes.
4276 BUILD_BUG_ON(sizeof(CommandList_struct) % 8);
4278 printk(KERN_INFO DRIVER_NAME "\n");
4280 err = bus_register(&cciss_bus_type);
4284 /* Register for our PCI devices */
4285 err = pci_register_driver(&cciss_pci_driver);
4287 goto err_bus_register;
4292 bus_unregister(&cciss_bus_type);
4296 static void __exit cciss_cleanup(void)
4300 pci_unregister_driver(&cciss_pci_driver);
4301 /* double check that all controller entrys have been removed */
4302 for (i = 0; i < MAX_CTLR; i++) {
4303 if (hba[i] != NULL) {
4304 printk(KERN_WARNING "cciss: had to remove"
4305 " controller %d\n", i);
4306 cciss_remove_one(hba[i]->pdev);
4309 remove_proc_entry("driver/cciss", NULL);
4310 bus_unregister(&cciss_bus_type);
4313 static void fail_all_cmds(unsigned long ctlr)
4315 /* If we get here, the board is apparently dead. */
4316 ctlr_info_t *h = hba[ctlr];
4317 CommandList_struct *c;
4318 unsigned long flags;
4320 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
4321 h->alive = 0; /* the controller apparently died... */
4323 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
4325 pci_disable_device(h->pdev); /* Make sure it is really dead. */
4327 /* move everything off the request queue onto the completed queue */
4328 while (!hlist_empty(&h->reqQ)) {
4329 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
4335 /* Now, fail everything on the completed queue with a HW error */
4336 while (!hlist_empty(&h->cmpQ)) {
4337 c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
4339 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4340 if (c->cmd_type == CMD_RWREQ) {
4341 complete_command(h, c, 0);
4342 } else if (c->cmd_type == CMD_IOCTL_PEND)
4343 complete(c->waiting);
4344 #ifdef CONFIG_CISS_SCSI_TAPE
4345 else if (c->cmd_type == CMD_SCSI)
4346 complete_scsi_command(c, 0, 0);
4349 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
4353 module_init(cciss_init);
4354 module_exit(cciss_cleanup);