2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/seq_file.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <asm/atomic.h>
50 #include <linux/kthread.h>
54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55 #define HPSA_DRIVER_VERSION "1.0.0"
56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58 /* How long to wait (in milliseconds) for board to go into simple mode */
59 #define MAX_CONFIG_WAIT 30000
60 #define MAX_IOCTL_CONFIG_WAIT 1000
62 /*define how many times we will try a command because of bus resets */
63 #define MAX_CMD_RETRIES 3
65 /* Embedded module documentation macros - see modules.h */
66 MODULE_AUTHOR("Hewlett-Packard Company");
67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70 MODULE_VERSION(HPSA_DRIVER_VERSION);
71 MODULE_LICENSE("GPL");
73 static int hpsa_allow_any;
74 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
78 /* define the PCI info for the cards we can control */
79 static const struct pci_device_id hpsa_pci_device_id[] = {
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
88 #define PCI_DEVICE_ID_HP_CISSF 0x333f
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F},
90 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
91 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
95 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
97 /* board_id = Subsystem Device ID & Vendor ID
98 * product = Marketing Name for the board
99 * access = Address of the struct of function pointers
101 static struct board_type products[] = {
102 {0x3241103C, "Smart Array P212", &SA5_access},
103 {0x3243103C, "Smart Array P410", &SA5_access},
104 {0x3245103C, "Smart Array P410i", &SA5_access},
105 {0x3247103C, "Smart Array P411", &SA5_access},
106 {0x3249103C, "Smart Array P812", &SA5_access},
107 {0x324a103C, "Smart Array P712m", &SA5_access},
108 {0x324b103C, "Smart Array P711m", &SA5_access},
109 {0x3233103C, "StorageWorks P1210m", &SA5_access},
110 {0x333F103C, "StorageWorks P1210m", &SA5_access},
111 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
114 static int number_of_controllers;
116 static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
117 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
118 static void start_io(struct ctlr_info *h);
121 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
124 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
125 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
126 static struct CommandList *cmd_alloc(struct ctlr_info *h);
127 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
128 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
129 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
132 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
133 void (*done)(struct scsi_cmnd *));
135 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
136 static int hpsa_slave_alloc(struct scsi_device *sdev);
137 static void hpsa_slave_destroy(struct scsi_device *sdev);
139 static ssize_t raid_level_show(struct device *dev,
140 struct device_attribute *attr, char *buf);
141 static ssize_t lunid_show(struct device *dev,
142 struct device_attribute *attr, char *buf);
143 static ssize_t unique_id_show(struct device *dev,
144 struct device_attribute *attr, char *buf);
145 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
146 static ssize_t host_store_rescan(struct device *dev,
147 struct device_attribute *attr, const char *buf, size_t count);
148 static int check_for_unit_attention(struct ctlr_info *h,
149 struct CommandList *c);
150 static void check_ioctl_unit_attention(struct ctlr_info *h,
151 struct CommandList *c);
152 /* performant mode helper functions */
153 static void calc_bucket_map(int *bucket, int num_buckets,
154 int nsgs, int *bucket_map);
155 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
156 static inline u32 next_command(struct ctlr_info *h);
158 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
159 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
160 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
161 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
163 static struct device_attribute *hpsa_sdev_attrs[] = {
164 &dev_attr_raid_level,
170 static struct device_attribute *hpsa_shost_attrs[] = {
175 static struct scsi_host_template hpsa_driver_template = {
176 .module = THIS_MODULE,
179 .queuecommand = hpsa_scsi_queue_command,
181 .sg_tablesize = MAXSGENTRIES,
182 .use_clustering = ENABLE_CLUSTERING,
183 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
185 .slave_alloc = hpsa_slave_alloc,
186 .slave_destroy = hpsa_slave_destroy,
188 .compat_ioctl = hpsa_compat_ioctl,
190 .sdev_attrs = hpsa_sdev_attrs,
191 .shost_attrs = hpsa_shost_attrs,
194 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
196 unsigned long *priv = shost_priv(sdev->host);
197 return (struct ctlr_info *) *priv;
200 static struct task_struct *hpsa_scan_thread;
201 static DEFINE_MUTEX(hpsa_scan_mutex);
202 static LIST_HEAD(hpsa_scan_q);
203 static int hpsa_scan_func(void *data);
206 * add_to_scan_list() - add controller to rescan queue
207 * @h: Pointer to the controller.
209 * Adds the controller to the rescan queue if not already on the queue.
211 * returns 1 if added to the queue, 0 if skipped (could be on the
212 * queue already, or the controller could be initializing or shutting
215 static int add_to_scan_list(struct ctlr_info *h)
217 struct ctlr_info *test_h;
221 if (h->busy_initializing)
225 * If we don't get the lock, it means the driver is unloading
226 * and there's no point in scheduling a new scan.
228 if (!mutex_trylock(&h->busy_shutting_down))
231 mutex_lock(&hpsa_scan_mutex);
232 list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
238 if (!found && !h->busy_scanning) {
239 INIT_COMPLETION(h->scan_wait);
240 list_add_tail(&h->scan_list, &hpsa_scan_q);
243 mutex_unlock(&hpsa_scan_mutex);
244 mutex_unlock(&h->busy_shutting_down);
250 * remove_from_scan_list() - remove controller from rescan queue
251 * @h: Pointer to the controller.
253 * Removes the controller from the rescan queue if present. Blocks if
254 * the controller is currently conducting a rescan. The controller
255 * can be in one of three states:
256 * 1. Doesn't need a scan
257 * 2. On the scan list, but not scanning yet (we remove it)
258 * 3. Busy scanning (and not on the list). In this case we want to wait for
259 * the scan to complete to make sure the scanning thread for this
260 * controller is completely idle.
262 static void remove_from_scan_list(struct ctlr_info *h)
264 struct ctlr_info *test_h, *tmp_h;
266 mutex_lock(&hpsa_scan_mutex);
267 list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
268 if (test_h == h) { /* state 2. */
269 list_del(&h->scan_list);
270 complete_all(&h->scan_wait);
271 mutex_unlock(&hpsa_scan_mutex);
275 if (h->busy_scanning) { /* state 3. */
276 mutex_unlock(&hpsa_scan_mutex);
277 wait_for_completion(&h->scan_wait);
278 } else { /* state 1, nothing to do. */
279 mutex_unlock(&hpsa_scan_mutex);
283 /* hpsa_scan_func() - kernel thread used to rescan controllers
286 * A kernel thread used scan for drive topology changes on
287 * controllers. The thread processes only one controller at a time
288 * using a queue. Controllers are added to the queue using
289 * add_to_scan_list() and removed from the queue either after done
290 * processing or using remove_from_scan_list().
294 static int hpsa_scan_func(__attribute__((unused)) void *data)
300 set_current_state(TASK_INTERRUPTIBLE);
302 if (kthread_should_stop())
306 mutex_lock(&hpsa_scan_mutex);
307 if (list_empty(&hpsa_scan_q)) {
308 mutex_unlock(&hpsa_scan_mutex);
311 h = list_entry(hpsa_scan_q.next, struct ctlr_info,
313 list_del(&h->scan_list);
314 h->busy_scanning = 1;
315 mutex_unlock(&hpsa_scan_mutex);
316 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
317 hpsa_update_scsi_devices(h, host_no);
318 complete_all(&h->scan_wait);
319 mutex_lock(&hpsa_scan_mutex);
320 h->busy_scanning = 0;
321 mutex_unlock(&hpsa_scan_mutex);
327 static int check_for_unit_attention(struct ctlr_info *h,
328 struct CommandList *c)
330 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
333 switch (c->err_info->SenseInfo[12]) {
335 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
336 "detected, command retried\n", h->ctlr);
339 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
340 "detected, action required\n", h->ctlr);
342 case REPORT_LUNS_CHANGED:
343 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
344 "changed\n", h->ctlr);
346 * Here, we could call add_to_scan_list and wake up the scan thread,
347 * except that it's quite likely that we will get more than one
348 * REPORT_LUNS_CHANGED condition in quick succession, which means
349 * that those which occur after the first one will likely happen
350 * *during* the hpsa_scan_thread's rescan. And the rescan code is not
351 * robust enough to restart in the middle, undoing what it has already
352 * done, and it's not clear that it's even possible to do this, since
353 * part of what it does is notify the SCSI mid layer, which starts
354 * doing it's own i/o to read partition tables and so on, and the
355 * driver doesn't have visibility to know what might need undoing.
356 * In any event, if possible, it is horribly complicated to get right
357 * so we just don't do it for now.
359 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
363 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
364 "or device reset detected\n", h->ctlr);
366 case UNIT_ATTENTION_CLEARED:
367 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
368 "cleared by another initiator\n", h->ctlr);
371 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
372 "unit attention detected\n", h->ctlr);
378 static ssize_t host_store_rescan(struct device *dev,
379 struct device_attribute *attr,
380 const char *buf, size_t count)
383 struct Scsi_Host *shost = class_to_shost(dev);
384 unsigned long *priv = shost_priv(shost);
385 h = (struct ctlr_info *) *priv;
386 if (add_to_scan_list(h)) {
387 wake_up_process(hpsa_scan_thread);
388 wait_for_completion_interruptible(&h->scan_wait);
393 /* Enqueuing and dequeuing functions for cmdlists. */
394 static inline void addQ(struct hlist_head *list, struct CommandList *c)
396 hlist_add_head(&c->list, list);
399 static inline u32 next_command(struct ctlr_info *h)
403 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
404 return h->access.command_completed(h);
406 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
407 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
408 (h->reply_pool_head)++;
409 h->commands_outstanding--;
413 /* Check for wraparound */
414 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
415 h->reply_pool_head = h->reply_pool;
416 h->reply_pool_wraparound ^= 1;
421 /* set_performant_mode: Modify the tag for cciss performant
422 * set bit 0 for pull model, bits 3-1 for block fetch
425 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
427 if (likely(h->transMethod == CFGTBL_Trans_Performant))
428 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
431 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
432 struct CommandList *c)
436 set_performant_mode(h, c);
437 spin_lock_irqsave(&h->lock, flags);
441 spin_unlock_irqrestore(&h->lock, flags);
444 static inline void removeQ(struct CommandList *c)
446 if (WARN_ON(hlist_unhashed(&c->list)))
448 hlist_del_init(&c->list);
451 static inline int is_hba_lunid(unsigned char scsi3addr[])
453 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
456 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
458 return (scsi3addr[3] & 0xC0) == 0x40;
461 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
464 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
466 static ssize_t raid_level_show(struct device *dev,
467 struct device_attribute *attr, char *buf)
470 unsigned char rlevel;
472 struct scsi_device *sdev;
473 struct hpsa_scsi_dev_t *hdev;
476 sdev = to_scsi_device(dev);
477 h = sdev_to_hba(sdev);
478 spin_lock_irqsave(&h->lock, flags);
479 hdev = sdev->hostdata;
481 spin_unlock_irqrestore(&h->lock, flags);
485 /* Is this even a logical drive? */
486 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
487 spin_unlock_irqrestore(&h->lock, flags);
488 l = snprintf(buf, PAGE_SIZE, "N/A\n");
492 rlevel = hdev->raid_level;
493 spin_unlock_irqrestore(&h->lock, flags);
494 if (rlevel > RAID_UNKNOWN)
495 rlevel = RAID_UNKNOWN;
496 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
500 static ssize_t lunid_show(struct device *dev,
501 struct device_attribute *attr, char *buf)
504 struct scsi_device *sdev;
505 struct hpsa_scsi_dev_t *hdev;
507 unsigned char lunid[8];
509 sdev = to_scsi_device(dev);
510 h = sdev_to_hba(sdev);
511 spin_lock_irqsave(&h->lock, flags);
512 hdev = sdev->hostdata;
514 spin_unlock_irqrestore(&h->lock, flags);
517 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
518 spin_unlock_irqrestore(&h->lock, flags);
519 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
520 lunid[0], lunid[1], lunid[2], lunid[3],
521 lunid[4], lunid[5], lunid[6], lunid[7]);
524 static ssize_t unique_id_show(struct device *dev,
525 struct device_attribute *attr, char *buf)
528 struct scsi_device *sdev;
529 struct hpsa_scsi_dev_t *hdev;
531 unsigned char sn[16];
533 sdev = to_scsi_device(dev);
534 h = sdev_to_hba(sdev);
535 spin_lock_irqsave(&h->lock, flags);
536 hdev = sdev->hostdata;
538 spin_unlock_irqrestore(&h->lock, flags);
541 memcpy(sn, hdev->device_id, sizeof(sn));
542 spin_unlock_irqrestore(&h->lock, flags);
543 return snprintf(buf, 16 * 2 + 2,
544 "%02X%02X%02X%02X%02X%02X%02X%02X"
545 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
546 sn[0], sn[1], sn[2], sn[3],
547 sn[4], sn[5], sn[6], sn[7],
548 sn[8], sn[9], sn[10], sn[11],
549 sn[12], sn[13], sn[14], sn[15]);
552 static int hpsa_find_target_lun(struct ctlr_info *h,
553 unsigned char scsi3addr[], int bus, int *target, int *lun)
555 /* finds an unused bus, target, lun for a new physical device
556 * assumes h->devlock is held
559 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
561 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
563 for (i = 0; i < h->ndevices; i++) {
564 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
565 set_bit(h->dev[i]->target, lun_taken);
568 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
569 if (!test_bit(i, lun_taken)) {
580 /* Add an entry into h->dev[] array. */
581 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
582 struct hpsa_scsi_dev_t *device,
583 struct hpsa_scsi_dev_t *added[], int *nadded)
585 /* assumes h->devlock is held */
588 unsigned char addr1[8], addr2[8];
589 struct hpsa_scsi_dev_t *sd;
591 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
592 dev_err(&h->pdev->dev, "too many devices, some will be "
597 /* physical devices do not have lun or target assigned until now. */
598 if (device->lun != -1)
599 /* Logical device, lun is already assigned. */
602 /* If this device a non-zero lun of a multi-lun device
603 * byte 4 of the 8-byte LUN addr will contain the logical
604 * unit no, zero otherise.
606 if (device->scsi3addr[4] == 0) {
607 /* This is not a non-zero lun of a multi-lun device */
608 if (hpsa_find_target_lun(h, device->scsi3addr,
609 device->bus, &device->target, &device->lun) != 0)
614 /* This is a non-zero lun of a multi-lun device.
615 * Search through our list and find the device which
616 * has the same 8 byte LUN address, excepting byte 4.
617 * Assign the same bus and target for this new LUN.
618 * Use the logical unit number from the firmware.
620 memcpy(addr1, device->scsi3addr, 8);
622 for (i = 0; i < n; i++) {
624 memcpy(addr2, sd->scsi3addr, 8);
626 /* differ only in byte 4? */
627 if (memcmp(addr1, addr2, 8) == 0) {
628 device->bus = sd->bus;
629 device->target = sd->target;
630 device->lun = device->scsi3addr[4];
634 if (device->lun == -1) {
635 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
636 " suspect firmware bug or unsupported hardware "
645 added[*nadded] = device;
648 /* initially, (before registering with scsi layer) we don't
649 * know our hostno and we don't want to print anything first
650 * time anyway (the scsi layer's inquiries will show that info)
652 /* if (hostno != -1) */
653 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
654 scsi_device_type(device->devtype), hostno,
655 device->bus, device->target, device->lun);
659 /* Remove an entry from h->dev[] array. */
660 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
661 struct hpsa_scsi_dev_t *removed[], int *nremoved)
663 /* assumes h->devlock is held */
665 struct hpsa_scsi_dev_t *sd;
667 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
670 removed[*nremoved] = h->dev[entry];
673 for (i = entry; i < h->ndevices-1; i++)
674 h->dev[i] = h->dev[i+1];
676 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
677 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
681 #define SCSI3ADDR_EQ(a, b) ( \
682 (a)[7] == (b)[7] && \
683 (a)[6] == (b)[6] && \
684 (a)[5] == (b)[5] && \
685 (a)[4] == (b)[4] && \
686 (a)[3] == (b)[3] && \
687 (a)[2] == (b)[2] && \
688 (a)[1] == (b)[1] && \
691 static void fixup_botched_add(struct ctlr_info *h,
692 struct hpsa_scsi_dev_t *added)
694 /* called when scsi_add_device fails in order to re-adjust
695 * h->dev[] to match the mid layer's view.
700 spin_lock_irqsave(&h->lock, flags);
701 for (i = 0; i < h->ndevices; i++) {
702 if (h->dev[i] == added) {
703 for (j = i; j < h->ndevices-1; j++)
704 h->dev[j] = h->dev[j+1];
709 spin_unlock_irqrestore(&h->lock, flags);
713 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
714 struct hpsa_scsi_dev_t *dev2)
716 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
717 (dev1->lun != -1 && dev2->lun != -1)) &&
718 dev1->devtype != 0x0C)
719 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
721 /* we compare everything except lun and target as these
722 * are not yet assigned. Compare parts likely
725 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
726 sizeof(dev1->scsi3addr)) != 0)
728 if (memcmp(dev1->device_id, dev2->device_id,
729 sizeof(dev1->device_id)) != 0)
731 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
733 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
735 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
737 if (dev1->devtype != dev2->devtype)
739 if (dev1->raid_level != dev2->raid_level)
741 if (dev1->bus != dev2->bus)
746 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
747 * and return needle location in *index. If scsi3addr matches, but not
748 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
749 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
751 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
752 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
756 #define DEVICE_NOT_FOUND 0
757 #define DEVICE_CHANGED 1
758 #define DEVICE_SAME 2
759 for (i = 0; i < haystack_size; i++) {
760 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
762 if (device_is_the_same(needle, haystack[i]))
765 return DEVICE_CHANGED;
769 return DEVICE_NOT_FOUND;
772 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
773 struct hpsa_scsi_dev_t *sd[], int nsds)
775 /* sd contains scsi3 addresses and devtypes, and inquiry
776 * data. This function takes what's in sd to be the current
777 * reality and updates h->dev[] to reflect that reality.
779 int i, entry, device_change, changes = 0;
780 struct hpsa_scsi_dev_t *csd;
782 struct hpsa_scsi_dev_t **added, **removed;
783 int nadded, nremoved;
784 struct Scsi_Host *sh = NULL;
786 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
788 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
791 if (!added || !removed) {
792 dev_warn(&h->pdev->dev, "out of memory in "
793 "adjust_hpsa_scsi_table\n");
797 spin_lock_irqsave(&h->devlock, flags);
799 /* find any devices in h->dev[] that are not in
800 * sd[] and remove them from h->dev[], and for any
801 * devices which have changed, remove the old device
802 * info and add the new device info.
807 while (i < h->ndevices) {
809 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
810 if (device_change == DEVICE_NOT_FOUND) {
812 hpsa_scsi_remove_entry(h, hostno, i,
814 continue; /* remove ^^^, hence i not incremented */
815 } else if (device_change == DEVICE_CHANGED) {
817 hpsa_scsi_remove_entry(h, hostno, i,
819 (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
821 /* add can't fail, we just removed one. */
822 sd[entry] = NULL; /* prevent it from being freed */
827 /* Now, make sure every device listed in sd[] is also
828 * listed in h->dev[], adding them if they aren't found
831 for (i = 0; i < nsds; i++) {
832 if (!sd[i]) /* if already added above. */
834 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
835 h->ndevices, &entry);
836 if (device_change == DEVICE_NOT_FOUND) {
838 if (hpsa_scsi_add_entry(h, hostno, sd[i],
839 added, &nadded) != 0)
841 sd[i] = NULL; /* prevent from being freed later. */
842 } else if (device_change == DEVICE_CHANGED) {
843 /* should never happen... */
845 dev_warn(&h->pdev->dev,
846 "device unexpectedly changed.\n");
847 /* but if it does happen, we just ignore that device */
850 spin_unlock_irqrestore(&h->devlock, flags);
852 /* Don't notify scsi mid layer of any changes the first time through
853 * (or if there are no changes) scsi_scan_host will do it later the
854 * first time through.
856 if (hostno == -1 || !changes)
860 /* Notify scsi mid layer of any removed devices */
861 for (i = 0; i < nremoved; i++) {
862 struct scsi_device *sdev =
863 scsi_device_lookup(sh, removed[i]->bus,
864 removed[i]->target, removed[i]->lun);
866 scsi_remove_device(sdev);
867 scsi_device_put(sdev);
869 /* We don't expect to get here.
870 * future cmds to this device will get selection
871 * timeout as if the device was gone.
873 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
874 " for removal.", hostno, removed[i]->bus,
875 removed[i]->target, removed[i]->lun);
881 /* Notify scsi mid layer of any added devices */
882 for (i = 0; i < nadded; i++) {
883 if (scsi_add_device(sh, added[i]->bus,
884 added[i]->target, added[i]->lun) == 0)
886 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
887 "device not added.\n", hostno, added[i]->bus,
888 added[i]->target, added[i]->lun);
889 /* now we have to remove it from h->dev,
890 * since it didn't get added to scsi mid layer
892 fixup_botched_add(h, added[i]);
901 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
902 * Assume's h->devlock is held.
904 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
905 int bus, int target, int lun)
908 struct hpsa_scsi_dev_t *sd;
910 for (i = 0; i < h->ndevices; i++) {
912 if (sd->bus == bus && sd->target == target && sd->lun == lun)
918 /* link sdev->hostdata to our per-device structure. */
919 static int hpsa_slave_alloc(struct scsi_device *sdev)
921 struct hpsa_scsi_dev_t *sd;
925 h = sdev_to_hba(sdev);
926 spin_lock_irqsave(&h->devlock, flags);
927 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
928 sdev_id(sdev), sdev->lun);
931 spin_unlock_irqrestore(&h->devlock, flags);
935 static void hpsa_slave_destroy(struct scsi_device *sdev)
940 static void hpsa_scsi_setup(struct ctlr_info *h)
944 spin_lock_init(&h->devlock);
947 static void complete_scsi_command(struct CommandList *cp,
948 int timeout, u32 tag)
950 struct scsi_cmnd *cmd;
952 struct ErrorInfo *ei;
954 unsigned char sense_key;
955 unsigned char asc; /* additional sense code */
956 unsigned char ascq; /* additional sense code qualifier */
959 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
962 scsi_dma_unmap(cmd); /* undo the DMA mappings */
964 cmd->result = (DID_OK << 16); /* host byte */
965 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
966 cmd->result |= (ei->ScsiStatus << 1);
968 /* copy the sense data whether we need to or not. */
969 memcpy(cmd->sense_buffer, ei->SenseInfo,
970 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
971 SCSI_SENSE_BUFFERSIZE :
973 scsi_set_resid(cmd, ei->ResidualCnt);
975 if (ei->CommandStatus == 0) {
981 /* an error has occurred */
982 switch (ei->CommandStatus) {
984 case CMD_TARGET_STATUS:
985 if (ei->ScsiStatus) {
987 sense_key = 0xf & ei->SenseInfo[2];
988 /* Get additional sense code */
989 asc = ei->SenseInfo[12];
990 /* Get addition sense code qualifier */
991 ascq = ei->SenseInfo[13];
994 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
995 if (check_for_unit_attention(h, cp)) {
996 cmd->result = DID_SOFT_ERROR << 16;
999 if (sense_key == ILLEGAL_REQUEST) {
1001 * SCSI REPORT_LUNS is commonly unsupported on
1002 * Smart Array. Suppress noisy complaint.
1004 if (cp->Request.CDB[0] == REPORT_LUNS)
1007 /* If ASC/ASCQ indicate Logical Unit
1008 * Not Supported condition,
1010 if ((asc == 0x25) && (ascq == 0x0)) {
1011 dev_warn(&h->pdev->dev, "cp %p "
1012 "has check condition\n", cp);
1017 if (sense_key == NOT_READY) {
1018 /* If Sense is Not Ready, Logical Unit
1019 * Not ready, Manual Intervention
1022 if ((asc == 0x04) && (ascq == 0x03)) {
1023 cmd->result = DID_NO_CONNECT << 16;
1024 dev_warn(&h->pdev->dev, "cp %p "
1025 "has check condition: unit "
1026 "not ready, manual "
1027 "intervention required\n", cp);
1033 /* Must be some other type of check condition */
1034 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1036 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1037 "Returning result: 0x%x, "
1038 "cmd=[%02x %02x %02x %02x %02x "
1039 "%02x %02x %02x %02x %02x]\n",
1040 cp, sense_key, asc, ascq,
1042 cmd->cmnd[0], cmd->cmnd[1],
1043 cmd->cmnd[2], cmd->cmnd[3],
1044 cmd->cmnd[4], cmd->cmnd[5],
1045 cmd->cmnd[6], cmd->cmnd[7],
1046 cmd->cmnd[8], cmd->cmnd[9]);
1051 /* Problem was not a check condition
1052 * Pass it up to the upper layers...
1054 if (ei->ScsiStatus) {
1055 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1056 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1057 "Returning result: 0x%x\n",
1059 sense_key, asc, ascq,
1061 } else { /* scsi status is zero??? How??? */
1062 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1063 "Returning no connection.\n", cp),
1065 /* Ordinarily, this case should never happen,
1066 * but there is a bug in some released firmware
1067 * revisions that allows it to happen if, for
1068 * example, a 4100 backplane loses power and
1069 * the tape drive is in it. We assume that
1070 * it's a fatal error of some kind because we
1071 * can't show that it wasn't. We will make it
1072 * look like selection timeout since that is
1073 * the most common reason for this to occur,
1074 * and it's severe enough.
1077 cmd->result = DID_NO_CONNECT << 16;
1081 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1083 case CMD_DATA_OVERRUN:
1084 dev_warn(&h->pdev->dev, "cp %p has"
1085 " completed with data overrun "
1089 /* print_bytes(cp, sizeof(*cp), 1, 0);
1091 /* We get CMD_INVALID if you address a non-existent device
1092 * instead of a selection timeout (no response). You will
1093 * see this if you yank out a drive, then try to access it.
1094 * This is kind of a shame because it means that any other
1095 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1096 * missing target. */
1097 cmd->result = DID_NO_CONNECT << 16;
1100 case CMD_PROTOCOL_ERR:
1101 dev_warn(&h->pdev->dev, "cp %p has "
1102 "protocol error \n", cp);
1104 case CMD_HARDWARE_ERR:
1105 cmd->result = DID_ERROR << 16;
1106 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1108 case CMD_CONNECTION_LOST:
1109 cmd->result = DID_ERROR << 16;
1110 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1113 cmd->result = DID_ABORT << 16;
1114 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1115 cp, ei->ScsiStatus);
1117 case CMD_ABORT_FAILED:
1118 cmd->result = DID_ERROR << 16;
1119 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1121 case CMD_UNSOLICITED_ABORT:
1122 cmd->result = DID_ABORT << 16;
1123 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1127 cmd->result = DID_TIME_OUT << 16;
1128 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1131 cmd->result = DID_ERROR << 16;
1132 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1133 cp, ei->CommandStatus);
1135 cmd->scsi_done(cmd);
1139 static int hpsa_scsi_detect(struct ctlr_info *h)
1141 struct Scsi_Host *sh;
1144 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1151 sh->max_channel = 3;
1152 sh->max_cmd_len = MAX_COMMAND_SIZE;
1153 sh->max_lun = HPSA_MAX_LUN;
1154 sh->max_id = HPSA_MAX_LUN;
1155 sh->can_queue = h->nr_cmds;
1156 sh->cmd_per_lun = h->nr_cmds;
1158 sh->hostdata[0] = (unsigned long) h;
1159 sh->irq = h->intr[PERF_MODE_INT];
1160 sh->unique_id = sh->irq;
1161 error = scsi_add_host(sh, &h->pdev->dev);
1168 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1169 " failed for controller %d\n", h->ctlr);
1173 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1174 " failed for controller %d\n", h->ctlr);
1178 static void hpsa_pci_unmap(struct pci_dev *pdev,
1179 struct CommandList *c, int sg_used, int data_direction)
1182 union u64bit addr64;
1184 for (i = 0; i < sg_used; i++) {
1185 addr64.val32.lower = c->SG[i].Addr.lower;
1186 addr64.val32.upper = c->SG[i].Addr.upper;
1187 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1192 static void hpsa_map_one(struct pci_dev *pdev,
1193 struct CommandList *cp,
1200 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1201 cp->Header.SGList = 0;
1202 cp->Header.SGTotal = 0;
1206 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1207 cp->SG[0].Addr.lower =
1208 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1209 cp->SG[0].Addr.upper =
1210 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1211 cp->SG[0].Len = buflen;
1212 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1213 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1216 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1217 struct CommandList *c)
1219 DECLARE_COMPLETION_ONSTACK(wait);
1222 enqueue_cmd_and_start_io(h, c);
1223 wait_for_completion(&wait);
1226 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1227 struct CommandList *c, int data_direction)
1229 int retry_count = 0;
1232 memset(c->err_info, 0, sizeof(c->err_info));
1233 hpsa_scsi_do_simple_cmd_core(h, c);
1235 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1236 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1239 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1241 struct ErrorInfo *ei;
1242 struct device *d = &cp->h->pdev->dev;
1245 switch (ei->CommandStatus) {
1246 case CMD_TARGET_STATUS:
1247 dev_warn(d, "cmd %p has completed with errors\n", cp);
1248 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1250 if (ei->ScsiStatus == 0)
1251 dev_warn(d, "SCSI status is abnormally zero. "
1252 "(probably indicates selection timeout "
1253 "reported incorrectly due to a known "
1254 "firmware bug, circa July, 2001.)\n");
1256 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1257 dev_info(d, "UNDERRUN\n");
1259 case CMD_DATA_OVERRUN:
1260 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1263 /* controller unfortunately reports SCSI passthru's
1264 * to non-existent targets as invalid commands.
1266 dev_warn(d, "cp %p is reported invalid (probably means "
1267 "target device no longer present)\n", cp);
1268 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1272 case CMD_PROTOCOL_ERR:
1273 dev_warn(d, "cp %p has protocol error \n", cp);
1275 case CMD_HARDWARE_ERR:
1276 /* cmd->result = DID_ERROR << 16; */
1277 dev_warn(d, "cp %p had hardware error\n", cp);
1279 case CMD_CONNECTION_LOST:
1280 dev_warn(d, "cp %p had connection lost\n", cp);
1283 dev_warn(d, "cp %p was aborted\n", cp);
1285 case CMD_ABORT_FAILED:
1286 dev_warn(d, "cp %p reports abort failed\n", cp);
1288 case CMD_UNSOLICITED_ABORT:
1289 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1292 dev_warn(d, "cp %p timed out\n", cp);
1295 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1300 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1301 unsigned char page, unsigned char *buf,
1302 unsigned char bufsize)
1305 struct CommandList *c;
1306 struct ErrorInfo *ei;
1308 c = cmd_special_alloc(h);
1310 if (c == NULL) { /* trouble... */
1311 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1315 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1316 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1318 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1319 hpsa_scsi_interpret_error(c);
1322 cmd_special_free(h, c);
1326 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1329 struct CommandList *c;
1330 struct ErrorInfo *ei;
1332 c = cmd_special_alloc(h);
1334 if (c == NULL) { /* trouble... */
1335 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1339 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1340 hpsa_scsi_do_simple_cmd_core(h, c);
1341 /* no unmap needed here because no data xfer. */
1344 if (ei->CommandStatus != 0) {
1345 hpsa_scsi_interpret_error(c);
1348 cmd_special_free(h, c);
1352 static void hpsa_get_raid_level(struct ctlr_info *h,
1353 unsigned char *scsi3addr, unsigned char *raid_level)
1358 *raid_level = RAID_UNKNOWN;
1359 buf = kzalloc(64, GFP_KERNEL);
1362 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1364 *raid_level = buf[8];
1365 if (*raid_level > RAID_UNKNOWN)
1366 *raid_level = RAID_UNKNOWN;
1371 /* Get the device id from inquiry page 0x83 */
1372 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1373 unsigned char *device_id, int buflen)
1380 buf = kzalloc(64, GFP_KERNEL);
1383 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1385 memcpy(device_id, &buf[8], buflen);
1390 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1391 struct ReportLUNdata *buf, int bufsize,
1392 int extended_response)
1395 struct CommandList *c;
1396 unsigned char scsi3addr[8];
1397 struct ErrorInfo *ei;
1399 c = cmd_special_alloc(h);
1400 if (c == NULL) { /* trouble... */
1401 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1404 /* address the controller */
1405 memset(scsi3addr, 0, sizeof(scsi3addr));
1406 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1407 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1408 if (extended_response)
1409 c->Request.CDB[1] = extended_response;
1410 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1412 if (ei->CommandStatus != 0 &&
1413 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1414 hpsa_scsi_interpret_error(c);
1417 cmd_special_free(h, c);
1421 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1422 struct ReportLUNdata *buf,
1423 int bufsize, int extended_response)
1425 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1428 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1429 struct ReportLUNdata *buf, int bufsize)
1431 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1434 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1435 int bus, int target, int lun)
1438 device->target = target;
1442 static int hpsa_update_device_info(struct ctlr_info *h,
1443 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1445 #define OBDR_TAPE_INQ_SIZE 49
1446 unsigned char *inq_buff;
1448 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1452 /* Do an inquiry to the device to see what it is. */
1453 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1454 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1455 /* Inquiry failed (msg printed already) */
1456 dev_err(&h->pdev->dev,
1457 "hpsa_update_device_info: inquiry failed\n");
1461 /* As a side effect, record the firmware version number
1462 * if we happen to be talking to the RAID controller.
1464 if (is_hba_lunid(scsi3addr))
1465 memcpy(h->firm_ver, &inq_buff[32], 4);
1467 this_device->devtype = (inq_buff[0] & 0x1f);
1468 memcpy(this_device->scsi3addr, scsi3addr, 8);
1469 memcpy(this_device->vendor, &inq_buff[8],
1470 sizeof(this_device->vendor));
1471 memcpy(this_device->model, &inq_buff[16],
1472 sizeof(this_device->model));
1473 memcpy(this_device->revision, &inq_buff[32],
1474 sizeof(this_device->revision));
1475 memset(this_device->device_id, 0,
1476 sizeof(this_device->device_id));
1477 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1478 sizeof(this_device->device_id));
1480 if (this_device->devtype == TYPE_DISK &&
1481 is_logical_dev_addr_mode(scsi3addr))
1482 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1484 this_device->raid_level = RAID_UNKNOWN;
1494 static unsigned char *msa2xxx_model[] = {
1502 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1506 for (i = 0; msa2xxx_model[i]; i++)
1507 if (strncmp(device->model, msa2xxx_model[i],
1508 strlen(msa2xxx_model[i])) == 0)
1513 /* Helper function to assign bus, target, lun mapping of devices.
1514 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1515 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1516 * Logical drive target and lun are assigned at this time, but
1517 * physical device lun and target assignment are deferred (assigned
1518 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1520 static void figure_bus_target_lun(struct ctlr_info *h,
1521 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1522 struct hpsa_scsi_dev_t *device)
1526 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1527 /* logical device */
1528 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1529 if (is_msa2xxx(h, device)) {
1531 *target = (lunid >> 16) & 0x3fff;
1532 *lun = lunid & 0x00ff;
1536 *target = lunid & 0x3fff;
1539 /* physical device */
1540 if (is_hba_lunid(lunaddrbytes))
1545 *lun = -1; /* we will fill these in later. */
1550 * If there is no lun 0 on a target, linux won't find any devices.
1551 * For the MSA2xxx boxes, we have to manually detect the enclosure
1552 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1553 * it for some reason. *tmpdevice is the target we're adding,
1554 * this_device is a pointer into the current element of currentsd[]
1555 * that we're building up in update_scsi_devices(), below.
1556 * lunzerobits is a bitmap that tracks which targets already have a
1558 * Returns 1 if an enclosure was added, 0 if not.
1560 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1561 struct hpsa_scsi_dev_t *tmpdevice,
1562 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1563 int bus, int target, int lun, unsigned long lunzerobits[],
1564 int *nmsa2xxx_enclosures)
1566 unsigned char scsi3addr[8];
1568 if (test_bit(target, lunzerobits))
1569 return 0; /* There is already a lun 0 on this target. */
1571 if (!is_logical_dev_addr_mode(lunaddrbytes))
1572 return 0; /* It's the logical targets that may lack lun 0. */
1574 if (!is_msa2xxx(h, tmpdevice))
1575 return 0; /* It's only the MSA2xxx that have this problem. */
1577 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1580 if (is_hba_lunid(scsi3addr))
1581 return 0; /* Don't add the RAID controller here. */
1583 #define MAX_MSA2XXX_ENCLOSURES 32
1584 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1585 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1586 "enclosures exceeded. Check your hardware "
1591 memset(scsi3addr, 0, 8);
1592 scsi3addr[3] = target;
1593 if (hpsa_update_device_info(h, scsi3addr, this_device))
1595 (*nmsa2xxx_enclosures)++;
1596 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1597 set_bit(target, lunzerobits);
1602 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1603 * logdev. The number of luns in physdev and logdev are returned in
1604 * *nphysicals and *nlogicals, respectively.
1605 * Returns 0 on success, -1 otherwise.
1607 static int hpsa_gather_lun_info(struct ctlr_info *h,
1609 struct ReportLUNdata *physdev, u32 *nphysicals,
1610 struct ReportLUNdata *logdev, u32 *nlogicals)
1612 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1613 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1616 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1617 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1618 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1619 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1620 *nphysicals - HPSA_MAX_PHYS_LUN);
1621 *nphysicals = HPSA_MAX_PHYS_LUN;
1623 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1624 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1627 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1628 /* Reject Logicals in excess of our max capability. */
1629 if (*nlogicals > HPSA_MAX_LUN) {
1630 dev_warn(&h->pdev->dev,
1631 "maximum logical LUNs (%d) exceeded. "
1632 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1633 *nlogicals - HPSA_MAX_LUN);
1634 *nlogicals = HPSA_MAX_LUN;
1636 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1637 dev_warn(&h->pdev->dev,
1638 "maximum logical + physical LUNs (%d) exceeded. "
1639 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1640 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1641 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1646 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1648 /* the idea here is we could get notified
1649 * that some devices have changed, so we do a report
1650 * physical luns and report logical luns cmd, and adjust
1651 * our list of devices accordingly.
1653 * The scsi3addr's of devices won't change so long as the
1654 * adapter is not reset. That means we can rescan and
1655 * tell which devices we already know about, vs. new
1656 * devices, vs. disappearing devices.
1658 struct ReportLUNdata *physdev_list = NULL;
1659 struct ReportLUNdata *logdev_list = NULL;
1660 unsigned char *inq_buff = NULL;
1663 u32 ndev_allocated = 0;
1664 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1666 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1667 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1668 int bus, target, lun;
1669 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1671 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1673 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1674 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1675 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1676 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1678 if (!currentsd || !physdev_list || !logdev_list ||
1679 !inq_buff || !tmpdevice) {
1680 dev_err(&h->pdev->dev, "out of memory\n");
1683 memset(lunzerobits, 0, sizeof(lunzerobits));
1685 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1686 logdev_list, &nlogicals))
1689 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1690 * but each of them 4 times through different paths. The plus 1
1691 * is for the RAID controller.
1693 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1695 /* Allocate the per device structures */
1696 for (i = 0; i < ndevs_to_allocate; i++) {
1697 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1698 if (!currentsd[i]) {
1699 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1700 __FILE__, __LINE__);
1706 /* adjust our table of devices */
1707 nmsa2xxx_enclosures = 0;
1708 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1711 /* Figure out where the LUN ID info is coming from */
1713 lunaddrbytes = &physdev_list->LUN[i][0];
1715 if (i < nphysicals + nlogicals)
1717 &logdev_list->LUN[i-nphysicals][0];
1718 else /* jam in the RAID controller at the end */
1719 lunaddrbytes = RAID_CTLR_LUNID;
1721 /* skip masked physical devices. */
1722 if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
1725 /* Get device type, vendor, model, device id */
1726 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1727 continue; /* skip it if we can't talk to it. */
1728 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1730 this_device = currentsd[ncurrent];
1733 * For the msa2xxx boxes, we have to insert a LUN 0 which
1734 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1735 * is nonetheless an enclosure device there. We have to
1736 * present that otherwise linux won't find anything if
1737 * there is no lun 0.
1739 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1740 lunaddrbytes, bus, target, lun, lunzerobits,
1741 &nmsa2xxx_enclosures)) {
1743 this_device = currentsd[ncurrent];
1746 *this_device = *tmpdevice;
1747 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1749 switch (this_device->devtype) {
1751 /* We don't *really* support actual CD-ROM devices,
1752 * just "One Button Disaster Recovery" tape drive
1753 * which temporarily pretends to be a CD-ROM drive.
1754 * So we check that the device is really an OBDR tape
1755 * device by checking for "$DR-10" in bytes 43-48 of
1759 #define OBDR_TAPE_SIG "$DR-10"
1760 strncpy(obdr_sig, &inq_buff[43], 6);
1762 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1763 /* Not OBDR device, ignore it. */
1774 case TYPE_MEDIUM_CHANGER:
1778 /* Only present the Smartarray HBA as a RAID controller.
1779 * If it's a RAID controller other than the HBA itself
1780 * (an external RAID controller, MSA500 or similar)
1783 if (!is_hba_lunid(lunaddrbytes))
1790 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1793 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1796 for (i = 0; i < ndev_allocated; i++)
1797 kfree(currentsd[i]);
1800 kfree(physdev_list);
1804 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1805 * dma mapping and fills in the scatter gather entries of the
1808 static int hpsa_scatter_gather(struct pci_dev *pdev,
1809 struct CommandList *cp,
1810 struct scsi_cmnd *cmd)
1813 struct scatterlist *sg;
1817 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
1819 use_sg = scsi_dma_map(cmd);
1824 goto sglist_finished;
1826 scsi_for_each_sg(cmd, sg, use_sg, i) {
1827 addr64 = (u64) sg_dma_address(sg);
1828 len = sg_dma_len(sg);
1829 cp->SG[i].Addr.lower =
1830 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1831 cp->SG[i].Addr.upper =
1832 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1833 cp->SG[i].Len = len;
1834 cp->SG[i].Ext = 0; /* we are not chaining */
1839 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
1840 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1845 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1846 void (*done)(struct scsi_cmnd *))
1848 struct ctlr_info *h;
1849 struct hpsa_scsi_dev_t *dev;
1850 unsigned char scsi3addr[8];
1851 struct CommandList *c;
1852 unsigned long flags;
1854 /* Get the ptr to our adapter structure out of cmd->host. */
1855 h = sdev_to_hba(cmd->device);
1856 dev = cmd->device->hostdata;
1858 cmd->result = DID_NO_CONNECT << 16;
1862 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1864 /* Need a lock as this is being allocated from the pool */
1865 spin_lock_irqsave(&h->lock, flags);
1867 spin_unlock_irqrestore(&h->lock, flags);
1868 if (c == NULL) { /* trouble... */
1869 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1870 return SCSI_MLQUEUE_HOST_BUSY;
1873 /* Fill in the command list header */
1875 cmd->scsi_done = done; /* save this for use by completion code */
1877 /* save c in case we have to abort it */
1878 cmd->host_scribble = (unsigned char *) c;
1880 c->cmd_type = CMD_SCSI;
1882 c->Header.ReplyQueue = 0; /* unused in simple mode */
1883 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1884 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1885 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1887 /* Fill in the request block... */
1889 c->Request.Timeout = 0;
1890 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1891 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1892 c->Request.CDBLen = cmd->cmd_len;
1893 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1894 c->Request.Type.Type = TYPE_CMD;
1895 c->Request.Type.Attribute = ATTR_SIMPLE;
1896 switch (cmd->sc_data_direction) {
1898 c->Request.Type.Direction = XFER_WRITE;
1900 case DMA_FROM_DEVICE:
1901 c->Request.Type.Direction = XFER_READ;
1904 c->Request.Type.Direction = XFER_NONE;
1906 case DMA_BIDIRECTIONAL:
1907 /* This can happen if a buggy application does a scsi passthru
1908 * and sets both inlen and outlen to non-zero. ( see
1909 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1912 c->Request.Type.Direction = XFER_RSVD;
1913 /* This is technically wrong, and hpsa controllers should
1914 * reject it with CMD_INVALID, which is the most correct
1915 * response, but non-fibre backends appear to let it
1916 * slide by, and give the same results as if this field
1917 * were set correctly. Either way is acceptable for
1918 * our purposes here.
1924 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
1925 cmd->sc_data_direction);
1930 if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
1932 return SCSI_MLQUEUE_HOST_BUSY;
1934 enqueue_cmd_and_start_io(h, c);
1935 /* the cmd'll come back via intr handler in complete_scsi_command() */
1939 static void hpsa_unregister_scsi(struct ctlr_info *h)
1941 /* we are being forcibly unloaded, and may not refuse. */
1942 scsi_remove_host(h->scsi_host);
1943 scsi_host_put(h->scsi_host);
1944 h->scsi_host = NULL;
1947 static int hpsa_register_scsi(struct ctlr_info *h)
1951 hpsa_update_scsi_devices(h, -1);
1952 rc = hpsa_scsi_detect(h);
1954 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
1955 " hpsa_scsi_detect(), rc is %d\n", rc);
1959 static int wait_for_device_to_become_ready(struct ctlr_info *h,
1960 unsigned char lunaddr[])
1964 int waittime = 1; /* seconds */
1965 struct CommandList *c;
1967 c = cmd_special_alloc(h);
1969 dev_warn(&h->pdev->dev, "out of memory in "
1970 "wait_for_device_to_become_ready.\n");
1974 /* Send test unit ready until device ready, or give up. */
1975 while (count < HPSA_TUR_RETRY_LIMIT) {
1977 /* Wait for a bit. do this first, because if we send
1978 * the TUR right away, the reset will just abort it.
1980 msleep(1000 * waittime);
1983 /* Increase wait time with each try, up to a point. */
1984 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
1985 waittime = waittime * 2;
1987 /* Send the Test Unit Ready */
1988 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
1989 hpsa_scsi_do_simple_cmd_core(h, c);
1990 /* no unmap needed here because no data xfer. */
1992 if (c->err_info->CommandStatus == CMD_SUCCESS)
1995 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
1996 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
1997 (c->err_info->SenseInfo[2] == NO_SENSE ||
1998 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2001 dev_warn(&h->pdev->dev, "waiting %d secs "
2002 "for device to become ready.\n", waittime);
2003 rc = 1; /* device not ready. */
2007 dev_warn(&h->pdev->dev, "giving up on device.\n");
2009 dev_warn(&h->pdev->dev, "device is ready.\n");
2011 cmd_special_free(h, c);
2015 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2016 * complaining. Doing a host- or bus-reset can't do anything good here.
2018 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2021 struct ctlr_info *h;
2022 struct hpsa_scsi_dev_t *dev;
2024 /* find the controller to which the command to be aborted was sent */
2025 h = sdev_to_hba(scsicmd->device);
2026 if (h == NULL) /* paranoia */
2028 dev_warn(&h->pdev->dev, "resetting drive\n");
2030 dev = scsicmd->device->hostdata;
2032 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2033 "device lookup failed.\n");
2036 /* send a reset to the SCSI LUN which the command was sent to */
2037 rc = hpsa_send_reset(h, dev->scsi3addr);
2038 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2041 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2046 * For operations that cannot sleep, a command block is allocated at init,
2047 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2048 * which ones are free or in use. Lock must be held when calling this.
2049 * cmd_free() is the complement.
2051 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2053 struct CommandList *c;
2055 union u64bit temp64;
2056 dma_addr_t cmd_dma_handle, err_dma_handle;
2059 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2060 if (i == h->nr_cmds)
2062 } while (test_and_set_bit
2063 (i & (BITS_PER_LONG - 1),
2064 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2065 c = h->cmd_pool + i;
2066 memset(c, 0, sizeof(*c));
2067 cmd_dma_handle = h->cmd_pool_dhandle
2069 c->err_info = h->errinfo_pool + i;
2070 memset(c->err_info, 0, sizeof(*c->err_info));
2071 err_dma_handle = h->errinfo_pool_dhandle
2072 + i * sizeof(*c->err_info);
2077 INIT_HLIST_NODE(&c->list);
2078 c->busaddr = (u32) cmd_dma_handle;
2079 temp64.val = (u64) err_dma_handle;
2080 c->ErrDesc.Addr.lower = temp64.val32.lower;
2081 c->ErrDesc.Addr.upper = temp64.val32.upper;
2082 c->ErrDesc.Len = sizeof(*c->err_info);
2088 /* For operations that can wait for kmalloc to possibly sleep,
2089 * this routine can be called. Lock need not be held to call
2090 * cmd_special_alloc. cmd_special_free() is the complement.
2092 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2094 struct CommandList *c;
2095 union u64bit temp64;
2096 dma_addr_t cmd_dma_handle, err_dma_handle;
2098 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2101 memset(c, 0, sizeof(*c));
2105 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2108 if (c->err_info == NULL) {
2109 pci_free_consistent(h->pdev,
2110 sizeof(*c), c, cmd_dma_handle);
2113 memset(c->err_info, 0, sizeof(*c->err_info));
2115 INIT_HLIST_NODE(&c->list);
2116 c->busaddr = (u32) cmd_dma_handle;
2117 temp64.val = (u64) err_dma_handle;
2118 c->ErrDesc.Addr.lower = temp64.val32.lower;
2119 c->ErrDesc.Addr.upper = temp64.val32.upper;
2120 c->ErrDesc.Len = sizeof(*c->err_info);
2126 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2130 i = c - h->cmd_pool;
2131 clear_bit(i & (BITS_PER_LONG - 1),
2132 h->cmd_pool_bits + (i / BITS_PER_LONG));
2136 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2138 union u64bit temp64;
2140 temp64.val32.lower = c->ErrDesc.Addr.lower;
2141 temp64.val32.upper = c->ErrDesc.Addr.upper;
2142 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2143 c->err_info, (dma_addr_t) temp64.val);
2144 pci_free_consistent(h->pdev, sizeof(*c),
2145 c, (dma_addr_t) c->busaddr);
2148 #ifdef CONFIG_COMPAT
2150 static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
2155 ret = hpsa_ioctl(dev, cmd, arg);
2160 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
2161 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2162 int cmd, void *arg);
2164 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2167 case CCISS_GETPCIINFO:
2168 case CCISS_GETINTINFO:
2169 case CCISS_SETINTINFO:
2170 case CCISS_GETNODENAME:
2171 case CCISS_SETNODENAME:
2172 case CCISS_GETHEARTBEAT:
2173 case CCISS_GETBUSTYPES:
2174 case CCISS_GETFIRMVER:
2175 case CCISS_GETDRIVVER:
2176 case CCISS_REVALIDVOLS:
2177 case CCISS_DEREGDISK:
2178 case CCISS_REGNEWDISK:
2180 case CCISS_RESCANDISK:
2181 case CCISS_GETLUNINFO:
2182 return do_ioctl(dev, cmd, arg);
2184 case CCISS_PASSTHRU32:
2185 return hpsa_ioctl32_passthru(dev, cmd, arg);
2186 case CCISS_BIG_PASSTHRU32:
2187 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2190 return -ENOIOCTLCMD;
2194 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2196 IOCTL32_Command_struct __user *arg32 =
2197 (IOCTL32_Command_struct __user *) arg;
2198 IOCTL_Command_struct arg64;
2199 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2204 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2205 sizeof(arg64.LUN_info));
2206 err |= copy_from_user(&arg64.Request, &arg32->Request,
2207 sizeof(arg64.Request));
2208 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2209 sizeof(arg64.error_info));
2210 err |= get_user(arg64.buf_size, &arg32->buf_size);
2211 err |= get_user(cp, &arg32->buf);
2212 arg64.buf = compat_ptr(cp);
2213 err |= copy_to_user(p, &arg64, sizeof(arg64));
2218 err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2221 err |= copy_in_user(&arg32->error_info, &p->error_info,
2222 sizeof(arg32->error_info));
2228 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2231 BIG_IOCTL32_Command_struct __user *arg32 =
2232 (BIG_IOCTL32_Command_struct __user *) arg;
2233 BIG_IOCTL_Command_struct arg64;
2234 BIG_IOCTL_Command_struct __user *p =
2235 compat_alloc_user_space(sizeof(arg64));
2240 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2241 sizeof(arg64.LUN_info));
2242 err |= copy_from_user(&arg64.Request, &arg32->Request,
2243 sizeof(arg64.Request));
2244 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2245 sizeof(arg64.error_info));
2246 err |= get_user(arg64.buf_size, &arg32->buf_size);
2247 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2248 err |= get_user(cp, &arg32->buf);
2249 arg64.buf = compat_ptr(cp);
2250 err |= copy_to_user(p, &arg64, sizeof(arg64));
2255 err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2258 err |= copy_in_user(&arg32->error_info, &p->error_info,
2259 sizeof(arg32->error_info));
2266 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2268 struct hpsa_pci_info pciinfo;
2272 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2273 pciinfo.bus = h->pdev->bus->number;
2274 pciinfo.dev_fn = h->pdev->devfn;
2275 pciinfo.board_id = h->board_id;
2276 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2281 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2283 DriverVer_type DriverVer;
2284 unsigned char vmaj, vmin, vsubmin;
2287 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2288 &vmaj, &vmin, &vsubmin);
2290 dev_info(&h->pdev->dev, "driver version string '%s' "
2291 "unrecognized.", HPSA_DRIVER_VERSION);
2296 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2299 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2304 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2306 IOCTL_Command_struct iocommand;
2307 struct CommandList *c;
2309 union u64bit temp64;
2313 if (!capable(CAP_SYS_RAWIO))
2315 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2317 if ((iocommand.buf_size < 1) &&
2318 (iocommand.Request.Type.Direction != XFER_NONE)) {
2321 if (iocommand.buf_size > 0) {
2322 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2326 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2327 /* Copy the data into the buffer we created */
2328 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2333 memset(buff, 0, iocommand.buf_size);
2334 c = cmd_special_alloc(h);
2339 /* Fill in the command type */
2340 c->cmd_type = CMD_IOCTL_PEND;
2341 /* Fill in Command Header */
2342 c->Header.ReplyQueue = 0; /* unused in simple mode */
2343 if (iocommand.buf_size > 0) { /* buffer to fill */
2344 c->Header.SGList = 1;
2345 c->Header.SGTotal = 1;
2346 } else { /* no buffers to fill */
2347 c->Header.SGList = 0;
2348 c->Header.SGTotal = 0;
2350 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2351 /* use the kernel address the cmd block for tag */
2352 c->Header.Tag.lower = c->busaddr;
2354 /* Fill in Request block */
2355 memcpy(&c->Request, &iocommand.Request,
2356 sizeof(c->Request));
2358 /* Fill in the scatter gather information */
2359 if (iocommand.buf_size > 0) {
2360 temp64.val = pci_map_single(h->pdev, buff,
2361 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2362 c->SG[0].Addr.lower = temp64.val32.lower;
2363 c->SG[0].Addr.upper = temp64.val32.upper;
2364 c->SG[0].Len = iocommand.buf_size;
2365 c->SG[0].Ext = 0; /* we are not chaining*/
2367 hpsa_scsi_do_simple_cmd_core(h, c);
2368 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2369 check_ioctl_unit_attention(h, c);
2371 /* Copy the error information out */
2372 memcpy(&iocommand.error_info, c->err_info,
2373 sizeof(iocommand.error_info));
2374 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2376 cmd_special_free(h, c);
2380 if (iocommand.Request.Type.Direction == XFER_READ) {
2381 /* Copy the data out of the buffer we created */
2382 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2384 cmd_special_free(h, c);
2389 cmd_special_free(h, c);
2393 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2395 BIG_IOCTL_Command_struct *ioc;
2396 struct CommandList *c;
2397 unsigned char **buff = NULL;
2398 int *buff_size = NULL;
2399 union u64bit temp64;
2405 BYTE __user *data_ptr;
2409 if (!capable(CAP_SYS_RAWIO))
2411 ioc = (BIG_IOCTL_Command_struct *)
2412 kmalloc(sizeof(*ioc), GFP_KERNEL);
2417 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2421 if ((ioc->buf_size < 1) &&
2422 (ioc->Request.Type.Direction != XFER_NONE)) {
2426 /* Check kmalloc limits using all SGs */
2427 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2431 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2435 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2440 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2445 left = ioc->buf_size;
2446 data_ptr = ioc->buf;
2448 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2449 buff_size[sg_used] = sz;
2450 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2451 if (buff[sg_used] == NULL) {
2455 if (ioc->Request.Type.Direction == XFER_WRITE) {
2456 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2461 memset(buff[sg_used], 0, sz);
2466 c = cmd_special_alloc(h);
2471 c->cmd_type = CMD_IOCTL_PEND;
2472 c->Header.ReplyQueue = 0;
2474 if (ioc->buf_size > 0) {
2475 c->Header.SGList = sg_used;
2476 c->Header.SGTotal = sg_used;
2478 c->Header.SGList = 0;
2479 c->Header.SGTotal = 0;
2481 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2482 c->Header.Tag.lower = c->busaddr;
2483 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2484 if (ioc->buf_size > 0) {
2486 for (i = 0; i < sg_used; i++) {
2487 temp64.val = pci_map_single(h->pdev, buff[i],
2488 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2489 c->SG[i].Addr.lower = temp64.val32.lower;
2490 c->SG[i].Addr.upper = temp64.val32.upper;
2491 c->SG[i].Len = buff_size[i];
2492 /* we are not chaining */
2496 hpsa_scsi_do_simple_cmd_core(h, c);
2497 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2498 check_ioctl_unit_attention(h, c);
2499 /* Copy the error information out */
2500 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2501 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2502 cmd_special_free(h, c);
2506 if (ioc->Request.Type.Direction == XFER_READ) {
2507 /* Copy the data out of the buffer we created */
2508 BYTE __user *ptr = ioc->buf;
2509 for (i = 0; i < sg_used; i++) {
2510 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2511 cmd_special_free(h, c);
2515 ptr += buff_size[i];
2518 cmd_special_free(h, c);
2522 for (i = 0; i < sg_used; i++)
2531 static void check_ioctl_unit_attention(struct ctlr_info *h,
2532 struct CommandList *c)
2534 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2535 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2536 (void) check_for_unit_attention(h, c);
2541 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2543 struct ctlr_info *h;
2544 void __user *argp = (void __user *)arg;
2546 h = sdev_to_hba(dev);
2549 case CCISS_DEREGDISK:
2550 case CCISS_REGNEWDISK:
2552 hpsa_update_scsi_devices(h, dev->host->host_no);
2554 case CCISS_GETPCIINFO:
2555 return hpsa_getpciinfo_ioctl(h, argp);
2556 case CCISS_GETDRIVVER:
2557 return hpsa_getdrivver_ioctl(h, argp);
2558 case CCISS_PASSTHRU:
2559 return hpsa_passthru_ioctl(h, argp);
2560 case CCISS_BIG_PASSTHRU:
2561 return hpsa_big_passthru_ioctl(h, argp);
2567 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2568 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2571 int pci_dir = XFER_NONE;
2573 c->cmd_type = CMD_IOCTL_PEND;
2574 c->Header.ReplyQueue = 0;
2575 if (buff != NULL && size > 0) {
2576 c->Header.SGList = 1;
2577 c->Header.SGTotal = 1;
2579 c->Header.SGList = 0;
2580 c->Header.SGTotal = 0;
2582 c->Header.Tag.lower = c->busaddr;
2583 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2585 c->Request.Type.Type = cmd_type;
2586 if (cmd_type == TYPE_CMD) {
2589 /* are we trying to read a vital product page */
2590 if (page_code != 0) {
2591 c->Request.CDB[1] = 0x01;
2592 c->Request.CDB[2] = page_code;
2594 c->Request.CDBLen = 6;
2595 c->Request.Type.Attribute = ATTR_SIMPLE;
2596 c->Request.Type.Direction = XFER_READ;
2597 c->Request.Timeout = 0;
2598 c->Request.CDB[0] = HPSA_INQUIRY;
2599 c->Request.CDB[4] = size & 0xFF;
2601 case HPSA_REPORT_LOG:
2602 case HPSA_REPORT_PHYS:
2603 /* Talking to controller so It's a physical command
2604 mode = 00 target = 0. Nothing to write.
2606 c->Request.CDBLen = 12;
2607 c->Request.Type.Attribute = ATTR_SIMPLE;
2608 c->Request.Type.Direction = XFER_READ;
2609 c->Request.Timeout = 0;
2610 c->Request.CDB[0] = cmd;
2611 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2612 c->Request.CDB[7] = (size >> 16) & 0xFF;
2613 c->Request.CDB[8] = (size >> 8) & 0xFF;
2614 c->Request.CDB[9] = size & 0xFF;
2617 case HPSA_READ_CAPACITY:
2618 c->Request.CDBLen = 10;
2619 c->Request.Type.Attribute = ATTR_SIMPLE;
2620 c->Request.Type.Direction = XFER_READ;
2621 c->Request.Timeout = 0;
2622 c->Request.CDB[0] = cmd;
2624 case HPSA_CACHE_FLUSH:
2625 c->Request.CDBLen = 12;
2626 c->Request.Type.Attribute = ATTR_SIMPLE;
2627 c->Request.Type.Direction = XFER_WRITE;
2628 c->Request.Timeout = 0;
2629 c->Request.CDB[0] = BMIC_WRITE;
2630 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2632 case TEST_UNIT_READY:
2633 c->Request.CDBLen = 6;
2634 c->Request.Type.Attribute = ATTR_SIMPLE;
2635 c->Request.Type.Direction = XFER_NONE;
2636 c->Request.Timeout = 0;
2639 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2643 } else if (cmd_type == TYPE_MSG) {
2646 case HPSA_DEVICE_RESET_MSG:
2647 c->Request.CDBLen = 16;
2648 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2649 c->Request.Type.Attribute = ATTR_SIMPLE;
2650 c->Request.Type.Direction = XFER_NONE;
2651 c->Request.Timeout = 0; /* Don't time out */
2652 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2653 c->Request.CDB[1] = 0x03; /* Reset target above */
2654 /* If bytes 4-7 are zero, it means reset the */
2656 c->Request.CDB[4] = 0x00;
2657 c->Request.CDB[5] = 0x00;
2658 c->Request.CDB[6] = 0x00;
2659 c->Request.CDB[7] = 0x00;
2663 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2668 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2672 switch (c->Request.Type.Direction) {
2674 pci_dir = PCI_DMA_FROMDEVICE;
2677 pci_dir = PCI_DMA_TODEVICE;
2680 pci_dir = PCI_DMA_NONE;
2683 pci_dir = PCI_DMA_BIDIRECTIONAL;
2686 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2692 * Map (physical) PCI mem into (virtual) kernel space
2694 static void __iomem *remap_pci_mem(ulong base, ulong size)
2696 ulong page_base = ((ulong) base) & PAGE_MASK;
2697 ulong page_offs = ((ulong) base) - page_base;
2698 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2700 return page_remapped ? (page_remapped + page_offs) : NULL;
2703 /* Takes cmds off the submission queue and sends them to the hardware,
2704 * then puts them on the queue of cmds waiting for completion.
2706 static void start_io(struct ctlr_info *h)
2708 struct CommandList *c;
2710 while (!hlist_empty(&h->reqQ)) {
2711 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2712 /* can't do anything if fifo is full */
2713 if ((h->access.fifo_full(h))) {
2714 dev_warn(&h->pdev->dev, "fifo full\n");
2718 /* Get the first entry from the Request Q */
2722 /* Tell the controller execute command */
2723 h->access.submit_command(h, c);
2725 /* Put job onto the completed Q */
2730 static inline unsigned long get_next_completion(struct ctlr_info *h)
2732 return h->access.command_completed(h);
2735 static inline bool interrupt_pending(struct ctlr_info *h)
2737 return h->access.intr_pending(h);
2740 static inline long interrupt_not_for_us(struct ctlr_info *h)
2742 return !(h->msi_vector || h->msix_vector) &&
2743 ((h->access.intr_pending(h) == 0) ||
2744 (h->interrupts_enabled == 0));
2747 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2750 if (unlikely(tag_index >= h->nr_cmds)) {
2751 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2757 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2760 if (likely(c->cmd_type == CMD_SCSI))
2761 complete_scsi_command(c, 0, raw_tag);
2762 else if (c->cmd_type == CMD_IOCTL_PEND)
2763 complete(c->waiting);
2766 static inline u32 hpsa_tag_contains_index(u32 tag)
2768 #define DIRECT_LOOKUP_BIT 0x10
2769 return tag & DIRECT_LOOKUP_BIT;
2772 static inline u32 hpsa_tag_to_index(u32 tag)
2774 #define DIRECT_LOOKUP_SHIFT 5
2775 return tag >> DIRECT_LOOKUP_SHIFT;
2778 static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2780 #define HPSA_ERROR_BITS 0x03
2781 return tag & ~HPSA_ERROR_BITS;
2784 /* process completion of an indexed ("direct lookup") command */
2785 static inline u32 process_indexed_cmd(struct ctlr_info *h,
2789 struct CommandList *c;
2791 tag_index = hpsa_tag_to_index(raw_tag);
2792 if (bad_tag(h, tag_index, raw_tag))
2793 return next_command(h);
2794 c = h->cmd_pool + tag_index;
2795 finish_cmd(c, raw_tag);
2796 return next_command(h);
2799 /* process completion of a non-indexed command */
2800 static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2804 struct CommandList *c = NULL;
2805 struct hlist_node *tmp;
2807 tag = hpsa_tag_discard_error_bits(raw_tag);
2808 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2809 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2810 finish_cmd(c, raw_tag);
2811 return next_command(h);
2814 bad_tag(h, h->nr_cmds + 1, raw_tag);
2815 return next_command(h);
2818 static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2820 struct ctlr_info *h = dev_id;
2821 unsigned long flags;
2824 if (interrupt_not_for_us(h))
2826 spin_lock_irqsave(&h->lock, flags);
2827 raw_tag = get_next_completion(h);
2828 while (raw_tag != FIFO_EMPTY) {
2829 if (hpsa_tag_contains_index(raw_tag))
2830 raw_tag = process_indexed_cmd(h, raw_tag);
2832 raw_tag = process_nonindexed_cmd(h, raw_tag);
2834 spin_unlock_irqrestore(&h->lock, flags);
2838 /* Send a message CDB to the firmwart. */
2839 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2843 struct CommandListHeader CommandHeader;
2844 struct RequestBlock Request;
2845 struct ErrDescriptor ErrorDescriptor;
2847 struct Command *cmd;
2848 static const size_t cmd_sz = sizeof(*cmd) +
2849 sizeof(cmd->ErrorDescriptor);
2851 uint32_t paddr32, tag;
2852 void __iomem *vaddr;
2855 vaddr = pci_ioremap_bar(pdev, 0);
2859 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
2860 * CCISS commands, so they must be allocated from the lower 4GiB of
2863 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2869 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2875 /* This must fit, because of the 32-bit consistent DMA mask. Also,
2876 * although there's no guarantee, we assume that the address is at
2877 * least 4-byte aligned (most likely, it's page-aligned).
2881 cmd->CommandHeader.ReplyQueue = 0;
2882 cmd->CommandHeader.SGList = 0;
2883 cmd->CommandHeader.SGTotal = 0;
2884 cmd->CommandHeader.Tag.lower = paddr32;
2885 cmd->CommandHeader.Tag.upper = 0;
2886 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
2888 cmd->Request.CDBLen = 16;
2889 cmd->Request.Type.Type = TYPE_MSG;
2890 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
2891 cmd->Request.Type.Direction = XFER_NONE;
2892 cmd->Request.Timeout = 0; /* Don't time out */
2893 cmd->Request.CDB[0] = opcode;
2894 cmd->Request.CDB[1] = type;
2895 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
2896 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
2897 cmd->ErrorDescriptor.Addr.upper = 0;
2898 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
2900 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
2902 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
2903 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2904 if (hpsa_tag_discard_error_bits(tag) == paddr32)
2906 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
2911 /* we leak the DMA buffer here ... no choice since the controller could
2912 * still complete the command.
2914 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
2915 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
2920 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
2922 if (tag & HPSA_ERROR_BIT) {
2923 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
2928 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
2933 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
2934 #define hpsa_noop(p) hpsa_message(p, 3, 0)
2936 static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
2938 /* the #defines are stolen from drivers/pci/msi.h. */
2939 #define msi_control_reg(base) (base + PCI_MSI_FLAGS)
2940 #define PCI_MSIX_FLAGS_ENABLE (1 << 15)
2945 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
2947 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2948 if (control & PCI_MSI_FLAGS_ENABLE) {
2949 dev_info(&pdev->dev, "resetting MSI\n");
2950 pci_write_config_word(pdev, msi_control_reg(pos),
2951 control & ~PCI_MSI_FLAGS_ENABLE);
2955 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
2957 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2958 if (control & PCI_MSIX_FLAGS_ENABLE) {
2959 dev_info(&pdev->dev, "resetting MSI-X\n");
2960 pci_write_config_word(pdev, msi_control_reg(pos),
2961 control & ~PCI_MSIX_FLAGS_ENABLE);
2968 /* This does a hard reset of the controller using PCI power management
2971 static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
2973 u16 pmcsr, saved_config_space[32];
2976 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2978 /* This is very nearly the same thing as
2980 * pci_save_state(pci_dev);
2981 * pci_set_power_state(pci_dev, PCI_D3hot);
2982 * pci_set_power_state(pci_dev, PCI_D0);
2983 * pci_restore_state(pci_dev);
2985 * but we can't use these nice canned kernel routines on
2986 * kexec, because they also check the MSI/MSI-X state in PCI
2987 * configuration space and do the wrong thing when it is
2988 * set/cleared. Also, the pci_save/restore_state functions
2989 * violate the ordering requirements for restoring the
2990 * configuration space from the CCISS document (see the
2991 * comment below). So we roll our own ....
2994 for (i = 0; i < 32; i++)
2995 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
2997 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3000 "hpsa_reset_controller: PCI PM not supported\n");
3004 /* Quoting from the Open CISS Specification: "The Power
3005 * Management Control/Status Register (CSR) controls the power
3006 * state of the device. The normal operating state is D0,
3007 * CSR=00h. The software off state is D3, CSR=03h. To reset
3008 * the controller, place the interface device in D3 then to
3009 * D0, this causes a secondary PCI reset which will reset the
3013 /* enter the D3hot power management state */
3014 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3015 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3017 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3021 /* enter the D0 power management state */
3022 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3024 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3028 /* Restore the PCI configuration space. The Open CISS
3029 * Specification says, "Restore the PCI Configuration
3030 * Registers, offsets 00h through 60h. It is important to
3031 * restore the command register, 16-bits at offset 04h,
3032 * last. Do not restore the configuration status register,
3033 * 16-bits at offset 06h." Note that the offset is 2*i.
3035 for (i = 0; i < 32; i++) {
3036 if (i == 2 || i == 3)
3038 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3041 pci_write_config_word(pdev, 4, saved_config_space[2]);
3047 * We cannot read the structure directly, for portability we must use
3049 * This is for debug only.
3052 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3057 dev_info(dev, "Controller Configuration information\n");
3058 dev_info(dev, "------------------------------------\n");
3059 for (i = 0; i < 4; i++)
3060 temp_name[i] = readb(&(tb->Signature[i]));
3061 temp_name[4] = '\0';
3062 dev_info(dev, " Signature = %s\n", temp_name);
3063 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3064 dev_info(dev, " Transport methods supported = 0x%x\n",
3065 readl(&(tb->TransportSupport)));
3066 dev_info(dev, " Transport methods active = 0x%x\n",
3067 readl(&(tb->TransportActive)));
3068 dev_info(dev, " Requested transport Method = 0x%x\n",
3069 readl(&(tb->HostWrite.TransportRequest)));
3070 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3071 readl(&(tb->HostWrite.CoalIntDelay)));
3072 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3073 readl(&(tb->HostWrite.CoalIntCount)));
3074 dev_info(dev, " Max outstanding commands = 0x%d\n",
3075 readl(&(tb->CmdsOutMax)));
3076 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3077 for (i = 0; i < 16; i++)
3078 temp_name[i] = readb(&(tb->ServerName[i]));
3079 temp_name[16] = '\0';
3080 dev_info(dev, " Server Name = %s\n", temp_name);
3081 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3082 readl(&(tb->HeartBeat)));
3084 #endif /* HPSA_DEBUG */
3086 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3088 int i, offset, mem_type, bar_type;
3090 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3093 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3094 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3095 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3098 mem_type = pci_resource_flags(pdev, i) &
3099 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3101 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3102 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3103 offset += 4; /* 32 bit */
3105 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3108 default: /* reserved in PCI 2.2 */
3109 dev_warn(&pdev->dev,
3110 "base address is invalid\n");
3115 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3121 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3122 * controllers that are capable. If not, we use IO-APIC mode.
3125 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3126 struct pci_dev *pdev, u32 board_id)
3128 #ifdef CONFIG_PCI_MSI
3130 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3134 /* Some boards advertise MSI but don't really support it */
3135 if ((board_id == 0x40700E11) ||
3136 (board_id == 0x40800E11) ||
3137 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3138 goto default_int_mode;
3139 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3140 dev_info(&pdev->dev, "MSIX\n");
3141 err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
3143 h->intr[0] = hpsa_msix_entries[0].vector;
3144 h->intr[1] = hpsa_msix_entries[1].vector;
3145 h->intr[2] = hpsa_msix_entries[2].vector;
3146 h->intr[3] = hpsa_msix_entries[3].vector;
3151 dev_warn(&pdev->dev, "only %d MSI-X vectors "
3152 "available\n", err);
3153 goto default_int_mode;
3155 dev_warn(&pdev->dev, "MSI-X init failed %d\n",
3157 goto default_int_mode;
3160 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3161 dev_info(&pdev->dev, "MSI\n");
3162 if (!pci_enable_msi(pdev))
3165 dev_warn(&pdev->dev, "MSI init failed\n");
3168 #endif /* CONFIG_PCI_MSI */
3169 /* if we get here we're going to use the default interrupt mode */
3170 h->intr[PERF_MODE_INT] = pdev->irq;
3173 static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3175 ushort subsystem_vendor_id, subsystem_device_id, command;
3176 u32 board_id, scratchpad = 0;
3179 u64 cfg_base_addr_index;
3181 int i, prod_index, err;
3183 subsystem_vendor_id = pdev->subsystem_vendor;
3184 subsystem_device_id = pdev->subsystem_device;
3185 board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
3186 subsystem_vendor_id);
3188 for (i = 0; i < ARRAY_SIZE(products); i++)
3189 if (board_id == products[i].board_id)
3194 if (prod_index == ARRAY_SIZE(products)) {
3196 if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
3198 dev_warn(&pdev->dev, "unrecognized board ID:"
3199 " 0x%08lx, ignoring.\n",
3200 (unsigned long) board_id);
3204 /* check to see if controller has been disabled
3205 * BEFORE trying to enable it
3207 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3208 if (!(command & 0x02)) {
3209 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3213 err = pci_enable_device(pdev);
3215 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3219 err = pci_request_regions(pdev, "hpsa");
3221 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
3225 /* If the kernel supports MSI/MSI-X we will try to enable that,
3226 * else we use the IO-APIC interrupt assigned to us by system ROM.
3228 hpsa_interrupt_mode(h, pdev, board_id);
3230 /* find the memory BAR */
3231 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3232 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3235 if (i == DEVICE_COUNT_RESOURCE) {
3236 dev_warn(&pdev->dev, "no memory BAR found\n");
3238 goto err_out_free_res;
3241 h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3245 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3247 /* Wait for the board to become ready. */
3248 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3249 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3250 if (scratchpad == HPSA_FIRMWARE_READY)
3252 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3254 if (scratchpad != HPSA_FIRMWARE_READY) {
3255 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3257 goto err_out_free_res;
3260 /* get the address index number */
3261 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3262 cfg_base_addr &= (u32) 0x0000ffff;
3263 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3264 if (cfg_base_addr_index == -1) {
3265 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3267 goto err_out_free_res;
3270 cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
3271 h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3272 cfg_base_addr_index) + cfg_offset,
3273 sizeof(h->cfgtable));
3274 /* Find performant mode table. */
3275 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3276 h->transtable = remap_pci_mem(pci_resource_start(pdev,
3277 cfg_base_addr_index)+cfg_offset+trans_offset,
3278 sizeof(*h->transtable));
3280 h->board_id = board_id;
3281 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3282 h->product_name = products[prod_index].product_name;
3283 h->access = *(products[prod_index].access);
3284 /* Allow room for some ioctls */
3285 h->nr_cmds = h->max_commands - 4;
3287 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3288 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3289 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3290 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3291 dev_warn(&pdev->dev, "not a valid CISS config table\n");
3293 goto err_out_free_res;
3297 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3299 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3301 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3305 /* Disabling DMA prefetch for the P600
3306 * An ASIC bug may result in a prefetch beyond
3309 if (board_id == 0x3225103C) {
3311 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3312 dma_prefetch |= 0x8000;
3313 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3316 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3317 /* Update the field, and then ring the doorbell */
3318 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3319 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3321 /* under certain very rare conditions, this can take awhile.
3322 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3323 * as we enter this code.)
3325 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3326 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3328 /* delay and try again */
3333 print_cfg_table(&pdev->dev, h->cfgtable);
3334 #endif /* HPSA_DEBUG */
3336 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3337 dev_warn(&pdev->dev, "unable to get board into simple mode\n");
3339 goto err_out_free_res;
3345 * Deliberately omit pci_disable_device(): it does something nasty to
3346 * Smart Array controllers that pci_enable_device does not undo
3348 pci_release_regions(pdev);
3352 static int __devinit hpsa_init_one(struct pci_dev *pdev,
3353 const struct pci_device_id *ent)
3357 struct ctlr_info *h;
3359 if (number_of_controllers == 0)
3360 printk(KERN_INFO DRIVER_NAME "\n");
3361 if (reset_devices) {
3362 /* Reset the controller with a PCI power-cycle */
3363 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3366 /* Some devices (notably the HP Smart Array 5i Controller)
3367 need a little pause here */
3368 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3370 /* Now try to get the controller to respond to a no-op */
3371 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3372 if (hpsa_noop(pdev) == 0)
3375 dev_warn(&pdev->dev, "no-op failed%s\n",
3376 (i < 11 ? "; re-trying" : ""));
3380 /* Command structures must be aligned on a 32-byte boundary because
3381 * the 5 lower bits of the address are used by the hardware. and by
3382 * the driver. See comments in hpsa.h for more info.
3384 #define COMMANDLIST_ALIGNMENT 32
3385 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3386 h = kzalloc(sizeof(*h), GFP_KERNEL);
3390 h->busy_initializing = 1;
3391 INIT_HLIST_HEAD(&h->cmpQ);
3392 INIT_HLIST_HEAD(&h->reqQ);
3393 mutex_init(&h->busy_shutting_down);
3394 init_completion(&h->scan_wait);
3395 rc = hpsa_pci_init(h, pdev);
3399 sprintf(h->devname, "hpsa%d", number_of_controllers);
3400 h->ctlr = number_of_controllers;
3401 number_of_controllers++;
3404 /* configure PCI DMA stuff */
3405 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3409 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3413 dev_err(&pdev->dev, "no suitable DMA available\n");
3418 /* make sure the board interrupts are off */
3419 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3420 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
3421 IRQF_DISABLED, h->devname, h);
3423 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3424 h->intr[PERF_MODE_INT], h->devname);
3428 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3429 h->devname, pdev->device,
3430 h->intr[PERF_MODE_INT], dac ? "" : " not");
3433 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3434 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3435 h->cmd_pool = pci_alloc_consistent(h->pdev,
3436 h->nr_cmds * sizeof(*h->cmd_pool),
3437 &(h->cmd_pool_dhandle));
3438 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3439 h->nr_cmds * sizeof(*h->errinfo_pool),
3440 &(h->errinfo_pool_dhandle));
3441 if ((h->cmd_pool_bits == NULL)
3442 || (h->cmd_pool == NULL)
3443 || (h->errinfo_pool == NULL)) {
3444 dev_err(&pdev->dev, "out of memory");
3448 spin_lock_init(&h->lock);
3450 pci_set_drvdata(pdev, h);
3451 memset(h->cmd_pool_bits, 0,
3452 ((h->nr_cmds + BITS_PER_LONG -
3453 1) / BITS_PER_LONG) * sizeof(unsigned long));
3457 /* Turn the interrupts on so we can service requests */
3458 h->access.set_intr_mask(h, HPSA_INTR_ON);
3460 hpsa_put_ctlr_into_performant_mode(h);
3461 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3462 h->busy_initializing = 0;
3466 kfree(h->cmd_pool_bits);
3468 pci_free_consistent(h->pdev,
3469 h->nr_cmds * sizeof(struct CommandList),
3470 h->cmd_pool, h->cmd_pool_dhandle);
3471 if (h->errinfo_pool)
3472 pci_free_consistent(h->pdev,
3473 h->nr_cmds * sizeof(struct ErrorInfo),
3475 h->errinfo_pool_dhandle);
3476 free_irq(h->intr[PERF_MODE_INT], h);
3479 h->busy_initializing = 0;
3484 static void hpsa_flush_cache(struct ctlr_info *h)
3487 struct CommandList *c;
3489 flush_buf = kzalloc(4, GFP_KERNEL);
3493 c = cmd_special_alloc(h);
3495 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3498 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3499 RAID_CTLR_LUNID, TYPE_CMD);
3500 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3501 if (c->err_info->CommandStatus != 0)
3502 dev_warn(&h->pdev->dev,
3503 "error flushing cache on controller\n");
3504 cmd_special_free(h, c);
3509 static void hpsa_shutdown(struct pci_dev *pdev)
3511 struct ctlr_info *h;
3513 h = pci_get_drvdata(pdev);
3514 /* Turn board interrupts off and send the flush cache command
3515 * sendcmd will turn off interrupt, and send the flush...
3516 * To write all data in the battery backed cache to disks
3518 hpsa_flush_cache(h);
3519 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3520 free_irq(h->intr[PERF_MODE_INT], h);
3521 #ifdef CONFIG_PCI_MSI
3523 pci_disable_msix(h->pdev);
3524 else if (h->msi_vector)
3525 pci_disable_msi(h->pdev);
3526 #endif /* CONFIG_PCI_MSI */
3529 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3531 struct ctlr_info *h;
3533 if (pci_get_drvdata(pdev) == NULL) {
3534 dev_err(&pdev->dev, "unable to remove device \n");
3537 h = pci_get_drvdata(pdev);
3538 mutex_lock(&h->busy_shutting_down);
3539 remove_from_scan_list(h);
3540 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3541 hpsa_shutdown(pdev);
3543 pci_free_consistent(h->pdev,
3544 h->nr_cmds * sizeof(struct CommandList),
3545 h->cmd_pool, h->cmd_pool_dhandle);
3546 pci_free_consistent(h->pdev,
3547 h->nr_cmds * sizeof(struct ErrorInfo),
3548 h->errinfo_pool, h->errinfo_pool_dhandle);
3549 pci_free_consistent(h->pdev, h->reply_pool_size,
3550 h->reply_pool, h->reply_pool_dhandle);
3551 kfree(h->cmd_pool_bits);
3552 kfree(h->blockFetchTable);
3554 * Deliberately omit pci_disable_device(): it does something nasty to
3555 * Smart Array controllers that pci_enable_device does not undo
3557 pci_release_regions(pdev);
3558 pci_set_drvdata(pdev, NULL);
3559 mutex_unlock(&h->busy_shutting_down);
3563 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3564 __attribute__((unused)) pm_message_t state)
3569 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3574 static struct pci_driver hpsa_pci_driver = {
3576 .probe = hpsa_init_one,
3577 .remove = __devexit_p(hpsa_remove_one),
3578 .id_table = hpsa_pci_device_id, /* id_table */
3579 .shutdown = hpsa_shutdown,
3580 .suspend = hpsa_suspend,
3581 .resume = hpsa_resume,
3584 /* Fill in bucket_map[], given nsgs (the max number of
3585 * scatter gather elements supported) and bucket[],
3586 * which is an array of 8 integers. The bucket[] array
3587 * contains 8 different DMA transfer sizes (in 16
3588 * byte increments) which the controller uses to fetch
3589 * commands. This function fills in bucket_map[], which
3590 * maps a given number of scatter gather elements to one of
3591 * the 8 DMA transfer sizes. The point of it is to allow the
3592 * controller to only do as much DMA as needed to fetch the
3593 * command, with the DMA transfer size encoded in the lower
3594 * bits of the command address.
3596 static void calc_bucket_map(int bucket[], int num_buckets,
3597 int nsgs, int *bucket_map)
3601 /* even a command with 0 SGs requires 4 blocks */
3602 #define MINIMUM_TRANSFER_BLOCKS 4
3603 #define NUM_BUCKETS 8
3604 /* Note, bucket_map must have nsgs+1 entries. */
3605 for (i = 0; i <= nsgs; i++) {
3606 /* Compute size of a command with i SG entries */
3607 size = i + MINIMUM_TRANSFER_BLOCKS;
3608 b = num_buckets; /* Assume the biggest bucket */
3609 /* Find the bucket that is just big enough */
3610 for (j = 0; j < 8; j++) {
3611 if (bucket[j] >= size) {
3616 /* for a command with i SG entries, use bucket b. */
3621 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3625 /* 5 = 1 s/g entry or 4k
3626 * 6 = 2 s/g entry or 8k
3627 * 8 = 4 s/g entry or 16k
3628 * 10 = 6 s/g entry or 24k
3630 int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
3633 unsigned long register_value;
3635 trans_support = readl(&(h->cfgtable->TransportSupport));
3636 if (!(trans_support & PERFORMANT_MODE))
3639 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3640 h->max_sg_entries = 32;
3641 /* Performant mode ring buffer and supporting data structures */
3642 h->reply_pool_size = h->max_commands * sizeof(u64);
3643 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
3644 &(h->reply_pool_dhandle));
3646 /* Need a block fetch table for performant mode */
3647 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
3648 sizeof(u32)), GFP_KERNEL);
3650 if ((h->reply_pool == NULL)
3651 || (h->blockFetchTable == NULL))
3654 h->reply_pool_wraparound = 1; /* spec: init to 1 */
3656 /* Controller spec: zero out this buffer. */
3657 memset(h->reply_pool, 0, h->reply_pool_size);
3658 h->reply_pool_head = h->reply_pool;
3660 trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3661 bft[7] = h->max_sg_entries + 4;
3662 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
3663 for (i = 0; i < 8; i++)
3664 writel(bft[i], &h->transtable->BlockFetch[i]);
3666 /* size of controller ring buffer */
3667 writel(h->max_commands, &h->transtable->RepQSize);
3668 writel(1, &h->transtable->RepQCount);
3669 writel(0, &h->transtable->RepQCtrAddrLow32);
3670 writel(0, &h->transtable->RepQCtrAddrHigh32);
3671 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
3672 writel(0, &h->transtable->RepQAddr0High32);
3673 writel(CFGTBL_Trans_Performant,
3674 &(h->cfgtable->HostWrite.TransportRequest));
3675 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3676 /* under certain very rare conditions, this can take awhile.
3677 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3678 * as we enter this code.) */
3679 for (l = 0; l < MAX_CONFIG_WAIT; l++) {
3680 register_value = readl(h->vaddr + SA5_DOORBELL);
3681 if (!(register_value & CFGTBL_ChangeReq))
3683 /* delay and try again */
3684 set_current_state(TASK_INTERRUPTIBLE);
3685 schedule_timeout(10);
3687 register_value = readl(&(h->cfgtable->TransportActive));
3688 if (!(register_value & CFGTBL_Trans_Performant)) {
3689 dev_warn(&h->pdev->dev, "unable to get board into"
3690 " performant mode\n");
3694 /* Change the access methods to the performant access methods */
3695 h->access = SA5_performant_access;
3696 h->transMethod = CFGTBL_Trans_Performant;
3702 pci_free_consistent(h->pdev, h->reply_pool_size,
3703 h->reply_pool, h->reply_pool_dhandle);
3704 kfree(h->blockFetchTable);
3708 * This is it. Register the PCI driver information for the cards we control
3709 * the OS will call our registered routines when it finds one of our cards.
3711 static int __init hpsa_init(void)
3714 /* Start the scan thread */
3715 hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3716 if (IS_ERR(hpsa_scan_thread)) {
3717 err = PTR_ERR(hpsa_scan_thread);
3720 err = pci_register_driver(&hpsa_pci_driver);
3722 kthread_stop(hpsa_scan_thread);
3726 static void __exit hpsa_cleanup(void)
3728 pci_unregister_driver(&hpsa_pci_driver);
3729 kthread_stop(hpsa_scan_thread);
3732 module_init(hpsa_init);
3733 module_exit(hpsa_cleanup);