a72a18eabe243a08f498540ccd9d6ee922fb6997
[safe/jmp/linux-2.6] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/delay.h>
29 #include <linux/fs.h>
30 #include <linux/timer.h>
31 #include <linux/seq_file.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
38 #include <linux/io.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <asm/atomic.h>
51 #include <linux/kthread.h>
52 #include "hpsa_cmd.h"
53 #include "hpsa.h"
54
55 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56 #define HPSA_DRIVER_VERSION "2.0.1-3"
57 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58
59 /* How long to wait (in milliseconds) for board to go into simple mode */
60 #define MAX_CONFIG_WAIT 30000
61 #define MAX_IOCTL_CONFIG_WAIT 1000
62
63 /*define how many times we will try a command because of bus resets */
64 #define MAX_CMD_RETRIES 3
65
66 /* Embedded module documentation macros - see modules.h */
67 MODULE_AUTHOR("Hewlett-Packard Company");
68 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
69         HPSA_DRIVER_VERSION);
70 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
71 MODULE_VERSION(HPSA_DRIVER_VERSION);
72 MODULE_LICENSE("GPL");
73
74 static int hpsa_allow_any;
75 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
76 MODULE_PARM_DESC(hpsa_allow_any,
77                 "Allow hpsa driver to access unknown HP Smart Array hardware");
78
79 /* define the PCI info for the cards we can control */
80 static const struct pci_device_id hpsa_pci_device_id[] = {
81         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
82         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
83         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
84         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
85         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
86         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
87         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
88         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
89 #define PCI_DEVICE_ID_HP_CISSF 0x333f
90         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x333F},
91         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
92                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
93         {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
97
98 /*  board_id = Subsystem Device ID & Vendor ID
99  *  product = Marketing Name for the board
100  *  access = Address of the struct of function pointers
101  */
102 static struct board_type products[] = {
103         {0x3241103C, "Smart Array P212", &SA5_access},
104         {0x3243103C, "Smart Array P410", &SA5_access},
105         {0x3245103C, "Smart Array P410i", &SA5_access},
106         {0x3247103C, "Smart Array P411", &SA5_access},
107         {0x3249103C, "Smart Array P812", &SA5_access},
108         {0x324a103C, "Smart Array P712m", &SA5_access},
109         {0x324b103C, "Smart Array P711m", &SA5_access},
110         {0x3233103C, "StorageWorks P1210m", &SA5_access},
111         {0x333F103C, "StorageWorks P1210m", &SA5_access},
112         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113 };
114
115 static int number_of_controllers;
116
117 static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
118 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
119 static void start_io(struct ctlr_info *h);
120
121 #ifdef CONFIG_COMPAT
122 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
123 #endif
124
125 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
126 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
127 static struct CommandList *cmd_alloc(struct ctlr_info *h);
128 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
129 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
130         void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
131         int cmd_type);
132
133 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134                 void (*done)(struct scsi_cmnd *));
135 static void hpsa_scan_start(struct Scsi_Host *);
136 static int hpsa_scan_finished(struct Scsi_Host *sh,
137         unsigned long elapsed_time);
138 static int hpsa_change_queue_depth(struct scsi_device *sdev,
139         int qdepth, int reason);
140
141 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
142 static int hpsa_slave_alloc(struct scsi_device *sdev);
143 static void hpsa_slave_destroy(struct scsi_device *sdev);
144
145 static ssize_t raid_level_show(struct device *dev,
146         struct device_attribute *attr, char *buf);
147 static ssize_t lunid_show(struct device *dev,
148         struct device_attribute *attr, char *buf);
149 static ssize_t unique_id_show(struct device *dev,
150         struct device_attribute *attr, char *buf);
151 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
152 static ssize_t host_store_rescan(struct device *dev,
153          struct device_attribute *attr, const char *buf, size_t count);
154 static int check_for_unit_attention(struct ctlr_info *h,
155         struct CommandList *c);
156 static void check_ioctl_unit_attention(struct ctlr_info *h,
157         struct CommandList *c);
158 /* performant mode helper functions */
159 static void calc_bucket_map(int *bucket, int num_buckets,
160         int nsgs, int *bucket_map);
161 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
162 static inline u32 next_command(struct ctlr_info *h);
163
164 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
165 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
166 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
167 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
168
169 static struct device_attribute *hpsa_sdev_attrs[] = {
170         &dev_attr_raid_level,
171         &dev_attr_lunid,
172         &dev_attr_unique_id,
173         NULL,
174 };
175
176 static struct device_attribute *hpsa_shost_attrs[] = {
177         &dev_attr_rescan,
178         NULL,
179 };
180
181 static struct scsi_host_template hpsa_driver_template = {
182         .module                 = THIS_MODULE,
183         .name                   = "hpsa",
184         .proc_name              = "hpsa",
185         .queuecommand           = hpsa_scsi_queue_command,
186         .scan_start             = hpsa_scan_start,
187         .scan_finished          = hpsa_scan_finished,
188         .change_queue_depth     = hpsa_change_queue_depth,
189         .this_id                = -1,
190         .sg_tablesize           = MAXSGENTRIES,
191         .use_clustering         = ENABLE_CLUSTERING,
192         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
193         .ioctl                  = hpsa_ioctl,
194         .slave_alloc            = hpsa_slave_alloc,
195         .slave_destroy          = hpsa_slave_destroy,
196 #ifdef CONFIG_COMPAT
197         .compat_ioctl           = hpsa_compat_ioctl,
198 #endif
199         .sdev_attrs = hpsa_sdev_attrs,
200         .shost_attrs = hpsa_shost_attrs,
201 };
202
203 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
204 {
205         unsigned long *priv = shost_priv(sdev->host);
206         return (struct ctlr_info *) *priv;
207 }
208
209 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
210 {
211         unsigned long *priv = shost_priv(sh);
212         return (struct ctlr_info *) *priv;
213 }
214
215 static struct task_struct *hpsa_scan_thread;
216 static DEFINE_MUTEX(hpsa_scan_mutex);
217 static LIST_HEAD(hpsa_scan_q);
218 static int hpsa_scan_func(void *data);
219
220 /**
221  * add_to_scan_list() - add controller to rescan queue
222  * @h:                Pointer to the controller.
223  *
224  * Adds the controller to the rescan queue if not already on the queue.
225  *
226  * returns 1 if added to the queue, 0 if skipped (could be on the
227  * queue already, or the controller could be initializing or shutting
228  * down).
229  **/
230 static int add_to_scan_list(struct ctlr_info *h)
231 {
232         struct ctlr_info *test_h;
233         int found = 0;
234         int ret = 0;
235
236         if (h->busy_initializing)
237                 return 0;
238
239         /*
240          * If we don't get the lock, it means the driver is unloading
241          * and there's no point in scheduling a new scan.
242          */
243         if (!mutex_trylock(&h->busy_shutting_down))
244                 return 0;
245
246         mutex_lock(&hpsa_scan_mutex);
247         list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
248                 if (test_h == h) {
249                         found = 1;
250                         break;
251                 }
252         }
253         if (!found && !h->busy_scanning) {
254                 INIT_COMPLETION(h->scan_wait);
255                 list_add_tail(&h->scan_list, &hpsa_scan_q);
256                 ret = 1;
257         }
258         mutex_unlock(&hpsa_scan_mutex);
259         mutex_unlock(&h->busy_shutting_down);
260
261         return ret;
262 }
263
264 /**
265  * remove_from_scan_list() - remove controller from rescan queue
266  * @h:                     Pointer to the controller.
267  *
268  * Removes the controller from the rescan queue if present. Blocks if
269  * the controller is currently conducting a rescan.  The controller
270  * can be in one of three states:
271  * 1. Doesn't need a scan
272  * 2. On the scan list, but not scanning yet (we remove it)
273  * 3. Busy scanning (and not on the list). In this case we want to wait for
274  *    the scan to complete to make sure the scanning thread for this
275  *    controller is completely idle.
276  **/
277 static void remove_from_scan_list(struct ctlr_info *h)
278 {
279         struct ctlr_info *test_h, *tmp_h;
280
281         mutex_lock(&hpsa_scan_mutex);
282         list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
283                 if (test_h == h) { /* state 2. */
284                         list_del(&h->scan_list);
285                         complete_all(&h->scan_wait);
286                         mutex_unlock(&hpsa_scan_mutex);
287                         return;
288                 }
289         }
290         if (h->busy_scanning) { /* state 3. */
291                 mutex_unlock(&hpsa_scan_mutex);
292                 wait_for_completion(&h->scan_wait);
293         } else { /* state 1, nothing to do. */
294                 mutex_unlock(&hpsa_scan_mutex);
295         }
296 }
297
298 /* hpsa_scan_func() - kernel thread used to rescan controllers
299  * @data:        Ignored.
300  *
301  * A kernel thread used scan for drive topology changes on
302  * controllers. The thread processes only one controller at a time
303  * using a queue.  Controllers are added to the queue using
304  * add_to_scan_list() and removed from the queue either after done
305  * processing or using remove_from_scan_list().
306  *
307  * returns 0.
308  **/
309 static int hpsa_scan_func(__attribute__((unused)) void *data)
310 {
311         struct ctlr_info *h;
312         int host_no;
313
314         while (1) {
315                 set_current_state(TASK_INTERRUPTIBLE);
316                 schedule();
317                 if (kthread_should_stop())
318                         break;
319
320                 while (1) {
321                         mutex_lock(&hpsa_scan_mutex);
322                         if (list_empty(&hpsa_scan_q)) {
323                                 mutex_unlock(&hpsa_scan_mutex);
324                                 break;
325                         }
326                         h = list_entry(hpsa_scan_q.next, struct ctlr_info,
327                                         scan_list);
328                         list_del(&h->scan_list);
329                         h->busy_scanning = 1;
330                         mutex_unlock(&hpsa_scan_mutex);
331                         host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
332                         hpsa_scan_start(h->scsi_host);
333                         complete_all(&h->scan_wait);
334                         mutex_lock(&hpsa_scan_mutex);
335                         h->busy_scanning = 0;
336                         mutex_unlock(&hpsa_scan_mutex);
337                 }
338         }
339         return 0;
340 }
341
342 static int check_for_unit_attention(struct ctlr_info *h,
343         struct CommandList *c)
344 {
345         if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
346                 return 0;
347
348         switch (c->err_info->SenseInfo[12]) {
349         case STATE_CHANGED:
350                 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
351                         "detected, command retried\n", h->ctlr);
352                 break;
353         case LUN_FAILED:
354                 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
355                         "detected, action required\n", h->ctlr);
356                 break;
357         case REPORT_LUNS_CHANGED:
358                 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
359                         "changed\n", h->ctlr);
360         /*
361          * Here, we could call add_to_scan_list and wake up the scan thread,
362          * except that it's quite likely that we will get more than one
363          * REPORT_LUNS_CHANGED condition in quick succession, which means
364          * that those which occur after the first one will likely happen
365          * *during* the hpsa_scan_thread's rescan.  And the rescan code is not
366          * robust enough to restart in the middle, undoing what it has already
367          * done, and it's not clear that it's even possible to do this, since
368          * part of what it does is notify the SCSI mid layer, which starts
369          * doing it's own i/o to read partition tables and so on, and the
370          * driver doesn't have visibility to know what might need undoing.
371          * In any event, if possible, it is horribly complicated to get right
372          * so we just don't do it for now.
373          *
374          * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
375          */
376                 break;
377         case POWER_OR_RESET:
378                 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
379                         "or device reset detected\n", h->ctlr);
380                 break;
381         case UNIT_ATTENTION_CLEARED:
382                 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
383                     "cleared by another initiator\n", h->ctlr);
384                 break;
385         default:
386                 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
387                         "unit attention detected\n", h->ctlr);
388                 break;
389         }
390         return 1;
391 }
392
393 static ssize_t host_store_rescan(struct device *dev,
394                                  struct device_attribute *attr,
395                                  const char *buf, size_t count)
396 {
397         struct ctlr_info *h;
398         struct Scsi_Host *shost = class_to_shost(dev);
399         h = shost_to_hba(shost);
400         if (add_to_scan_list(h)) {
401                 wake_up_process(hpsa_scan_thread);
402                 wait_for_completion_interruptible(&h->scan_wait);
403         }
404         return count;
405 }
406
407 /* Enqueuing and dequeuing functions for cmdlists. */
408 static inline void addQ(struct hlist_head *list, struct CommandList *c)
409 {
410         hlist_add_head(&c->list, list);
411 }
412
413 static inline u32 next_command(struct ctlr_info *h)
414 {
415         u32 a;
416
417         if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
418                 return h->access.command_completed(h);
419
420         if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
421                 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
422                 (h->reply_pool_head)++;
423                 h->commands_outstanding--;
424         } else {
425                 a = FIFO_EMPTY;
426         }
427         /* Check for wraparound */
428         if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
429                 h->reply_pool_head = h->reply_pool;
430                 h->reply_pool_wraparound ^= 1;
431         }
432         return a;
433 }
434
435 /* set_performant_mode: Modify the tag for cciss performant
436  * set bit 0 for pull model, bits 3-1 for block fetch
437  * register number
438  */
439 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
440 {
441         if (likely(h->transMethod == CFGTBL_Trans_Performant))
442                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
443 }
444
445 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
446         struct CommandList *c)
447 {
448         unsigned long flags;
449
450         set_performant_mode(h, c);
451         spin_lock_irqsave(&h->lock, flags);
452         addQ(&h->reqQ, c);
453         h->Qdepth++;
454         start_io(h);
455         spin_unlock_irqrestore(&h->lock, flags);
456 }
457
458 static inline void removeQ(struct CommandList *c)
459 {
460         if (WARN_ON(hlist_unhashed(&c->list)))
461                 return;
462         hlist_del_init(&c->list);
463 }
464
465 static inline int is_hba_lunid(unsigned char scsi3addr[])
466 {
467         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
468 }
469
470 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
471 {
472         return (scsi3addr[3] & 0xC0) == 0x40;
473 }
474
475 static inline int is_scsi_rev_5(struct ctlr_info *h)
476 {
477         if (!h->hba_inquiry_data)
478                 return 0;
479         if ((h->hba_inquiry_data[2] & 0x07) == 5)
480                 return 1;
481         return 0;
482 }
483
484 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
485         "UNKNOWN"
486 };
487 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
488
489 static ssize_t raid_level_show(struct device *dev,
490              struct device_attribute *attr, char *buf)
491 {
492         ssize_t l = 0;
493         unsigned char rlevel;
494         struct ctlr_info *h;
495         struct scsi_device *sdev;
496         struct hpsa_scsi_dev_t *hdev;
497         unsigned long flags;
498
499         sdev = to_scsi_device(dev);
500         h = sdev_to_hba(sdev);
501         spin_lock_irqsave(&h->lock, flags);
502         hdev = sdev->hostdata;
503         if (!hdev) {
504                 spin_unlock_irqrestore(&h->lock, flags);
505                 return -ENODEV;
506         }
507
508         /* Is this even a logical drive? */
509         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
510                 spin_unlock_irqrestore(&h->lock, flags);
511                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
512                 return l;
513         }
514
515         rlevel = hdev->raid_level;
516         spin_unlock_irqrestore(&h->lock, flags);
517         if (rlevel > RAID_UNKNOWN)
518                 rlevel = RAID_UNKNOWN;
519         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
520         return l;
521 }
522
523 static ssize_t lunid_show(struct device *dev,
524              struct device_attribute *attr, char *buf)
525 {
526         struct ctlr_info *h;
527         struct scsi_device *sdev;
528         struct hpsa_scsi_dev_t *hdev;
529         unsigned long flags;
530         unsigned char lunid[8];
531
532         sdev = to_scsi_device(dev);
533         h = sdev_to_hba(sdev);
534         spin_lock_irqsave(&h->lock, flags);
535         hdev = sdev->hostdata;
536         if (!hdev) {
537                 spin_unlock_irqrestore(&h->lock, flags);
538                 return -ENODEV;
539         }
540         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
541         spin_unlock_irqrestore(&h->lock, flags);
542         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
543                 lunid[0], lunid[1], lunid[2], lunid[3],
544                 lunid[4], lunid[5], lunid[6], lunid[7]);
545 }
546
547 static ssize_t unique_id_show(struct device *dev,
548              struct device_attribute *attr, char *buf)
549 {
550         struct ctlr_info *h;
551         struct scsi_device *sdev;
552         struct hpsa_scsi_dev_t *hdev;
553         unsigned long flags;
554         unsigned char sn[16];
555
556         sdev = to_scsi_device(dev);
557         h = sdev_to_hba(sdev);
558         spin_lock_irqsave(&h->lock, flags);
559         hdev = sdev->hostdata;
560         if (!hdev) {
561                 spin_unlock_irqrestore(&h->lock, flags);
562                 return -ENODEV;
563         }
564         memcpy(sn, hdev->device_id, sizeof(sn));
565         spin_unlock_irqrestore(&h->lock, flags);
566         return snprintf(buf, 16 * 2 + 2,
567                         "%02X%02X%02X%02X%02X%02X%02X%02X"
568                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
569                         sn[0], sn[1], sn[2], sn[3],
570                         sn[4], sn[5], sn[6], sn[7],
571                         sn[8], sn[9], sn[10], sn[11],
572                         sn[12], sn[13], sn[14], sn[15]);
573 }
574
575 static int hpsa_find_target_lun(struct ctlr_info *h,
576         unsigned char scsi3addr[], int bus, int *target, int *lun)
577 {
578         /* finds an unused bus, target, lun for a new physical device
579          * assumes h->devlock is held
580          */
581         int i, found = 0;
582         DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
583
584         memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
585
586         for (i = 0; i < h->ndevices; i++) {
587                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
588                         set_bit(h->dev[i]->target, lun_taken);
589         }
590
591         for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
592                 if (!test_bit(i, lun_taken)) {
593                         /* *bus = 1; */
594                         *target = i;
595                         *lun = 0;
596                         found = 1;
597                         break;
598                 }
599         }
600         return !found;
601 }
602
603 /* Add an entry into h->dev[] array. */
604 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
605                 struct hpsa_scsi_dev_t *device,
606                 struct hpsa_scsi_dev_t *added[], int *nadded)
607 {
608         /* assumes h->devlock is held */
609         int n = h->ndevices;
610         int i;
611         unsigned char addr1[8], addr2[8];
612         struct hpsa_scsi_dev_t *sd;
613
614         if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
615                 dev_err(&h->pdev->dev, "too many devices, some will be "
616                         "inaccessible.\n");
617                 return -1;
618         }
619
620         /* physical devices do not have lun or target assigned until now. */
621         if (device->lun != -1)
622                 /* Logical device, lun is already assigned. */
623                 goto lun_assigned;
624
625         /* If this device a non-zero lun of a multi-lun device
626          * byte 4 of the 8-byte LUN addr will contain the logical
627          * unit no, zero otherise.
628          */
629         if (device->scsi3addr[4] == 0) {
630                 /* This is not a non-zero lun of a multi-lun device */
631                 if (hpsa_find_target_lun(h, device->scsi3addr,
632                         device->bus, &device->target, &device->lun) != 0)
633                         return -1;
634                 goto lun_assigned;
635         }
636
637         /* This is a non-zero lun of a multi-lun device.
638          * Search through our list and find the device which
639          * has the same 8 byte LUN address, excepting byte 4.
640          * Assign the same bus and target for this new LUN.
641          * Use the logical unit number from the firmware.
642          */
643         memcpy(addr1, device->scsi3addr, 8);
644         addr1[4] = 0;
645         for (i = 0; i < n; i++) {
646                 sd = h->dev[i];
647                 memcpy(addr2, sd->scsi3addr, 8);
648                 addr2[4] = 0;
649                 /* differ only in byte 4? */
650                 if (memcmp(addr1, addr2, 8) == 0) {
651                         device->bus = sd->bus;
652                         device->target = sd->target;
653                         device->lun = device->scsi3addr[4];
654                         break;
655                 }
656         }
657         if (device->lun == -1) {
658                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
659                         " suspect firmware bug or unsupported hardware "
660                         "configuration.\n");
661                         return -1;
662         }
663
664 lun_assigned:
665
666         h->dev[n] = device;
667         h->ndevices++;
668         added[*nadded] = device;
669         (*nadded)++;
670
671         /* initially, (before registering with scsi layer) we don't
672          * know our hostno and we don't want to print anything first
673          * time anyway (the scsi layer's inquiries will show that info)
674          */
675         /* if (hostno != -1) */
676                 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
677                         scsi_device_type(device->devtype), hostno,
678                         device->bus, device->target, device->lun);
679         return 0;
680 }
681
682 /* Replace an entry from h->dev[] array. */
683 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
684         int entry, struct hpsa_scsi_dev_t *new_entry,
685         struct hpsa_scsi_dev_t *added[], int *nadded,
686         struct hpsa_scsi_dev_t *removed[], int *nremoved)
687 {
688         /* assumes h->devlock is held */
689         BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
690         removed[*nremoved] = h->dev[entry];
691         (*nremoved)++;
692         h->dev[entry] = new_entry;
693         added[*nadded] = new_entry;
694         (*nadded)++;
695         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
696                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
697                         new_entry->target, new_entry->lun);
698 }
699
700 /* Remove an entry from h->dev[] array. */
701 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
702         struct hpsa_scsi_dev_t *removed[], int *nremoved)
703 {
704         /* assumes h->devlock is held */
705         int i;
706         struct hpsa_scsi_dev_t *sd;
707
708         BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
709
710         sd = h->dev[entry];
711         removed[*nremoved] = h->dev[entry];
712         (*nremoved)++;
713
714         for (i = entry; i < h->ndevices-1; i++)
715                 h->dev[i] = h->dev[i+1];
716         h->ndevices--;
717         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
718                 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
719                 sd->lun);
720 }
721
722 #define SCSI3ADDR_EQ(a, b) ( \
723         (a)[7] == (b)[7] && \
724         (a)[6] == (b)[6] && \
725         (a)[5] == (b)[5] && \
726         (a)[4] == (b)[4] && \
727         (a)[3] == (b)[3] && \
728         (a)[2] == (b)[2] && \
729         (a)[1] == (b)[1] && \
730         (a)[0] == (b)[0])
731
732 static void fixup_botched_add(struct ctlr_info *h,
733         struct hpsa_scsi_dev_t *added)
734 {
735         /* called when scsi_add_device fails in order to re-adjust
736          * h->dev[] to match the mid layer's view.
737          */
738         unsigned long flags;
739         int i, j;
740
741         spin_lock_irqsave(&h->lock, flags);
742         for (i = 0; i < h->ndevices; i++) {
743                 if (h->dev[i] == added) {
744                         for (j = i; j < h->ndevices-1; j++)
745                                 h->dev[j] = h->dev[j+1];
746                         h->ndevices--;
747                         break;
748                 }
749         }
750         spin_unlock_irqrestore(&h->lock, flags);
751         kfree(added);
752 }
753
754 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
755         struct hpsa_scsi_dev_t *dev2)
756 {
757         if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
758                 (dev1->lun != -1 && dev2->lun != -1)) &&
759                 dev1->devtype != 0x0C)
760                 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
761
762         /* we compare everything except lun and target as these
763          * are not yet assigned.  Compare parts likely
764          * to differ first
765          */
766         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
767                 sizeof(dev1->scsi3addr)) != 0)
768                 return 0;
769         if (memcmp(dev1->device_id, dev2->device_id,
770                 sizeof(dev1->device_id)) != 0)
771                 return 0;
772         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
773                 return 0;
774         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
775                 return 0;
776         if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
777                 return 0;
778         if (dev1->devtype != dev2->devtype)
779                 return 0;
780         if (dev1->raid_level != dev2->raid_level)
781                 return 0;
782         if (dev1->bus != dev2->bus)
783                 return 0;
784         return 1;
785 }
786
787 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
788  * and return needle location in *index.  If scsi3addr matches, but not
789  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
790  * location in *index.  If needle not found, return DEVICE_NOT_FOUND.
791  */
792 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
793         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
794         int *index)
795 {
796         int i;
797 #define DEVICE_NOT_FOUND 0
798 #define DEVICE_CHANGED 1
799 #define DEVICE_SAME 2
800         for (i = 0; i < haystack_size; i++) {
801                 if (haystack[i] == NULL) /* previously removed. */
802                         continue;
803                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
804                         *index = i;
805                         if (device_is_the_same(needle, haystack[i]))
806                                 return DEVICE_SAME;
807                         else
808                                 return DEVICE_CHANGED;
809                 }
810         }
811         *index = -1;
812         return DEVICE_NOT_FOUND;
813 }
814
815 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
816         struct hpsa_scsi_dev_t *sd[], int nsds)
817 {
818         /* sd contains scsi3 addresses and devtypes, and inquiry
819          * data.  This function takes what's in sd to be the current
820          * reality and updates h->dev[] to reflect that reality.
821          */
822         int i, entry, device_change, changes = 0;
823         struct hpsa_scsi_dev_t *csd;
824         unsigned long flags;
825         struct hpsa_scsi_dev_t **added, **removed;
826         int nadded, nremoved;
827         struct Scsi_Host *sh = NULL;
828
829         added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
830                 GFP_KERNEL);
831         removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
832                 GFP_KERNEL);
833
834         if (!added || !removed) {
835                 dev_warn(&h->pdev->dev, "out of memory in "
836                         "adjust_hpsa_scsi_table\n");
837                 goto free_and_out;
838         }
839
840         spin_lock_irqsave(&h->devlock, flags);
841
842         /* find any devices in h->dev[] that are not in
843          * sd[] and remove them from h->dev[], and for any
844          * devices which have changed, remove the old device
845          * info and add the new device info.
846          */
847         i = 0;
848         nremoved = 0;
849         nadded = 0;
850         while (i < h->ndevices) {
851                 csd = h->dev[i];
852                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
853                 if (device_change == DEVICE_NOT_FOUND) {
854                         changes++;
855                         hpsa_scsi_remove_entry(h, hostno, i,
856                                 removed, &nremoved);
857                         continue; /* remove ^^^, hence i not incremented */
858                 } else if (device_change == DEVICE_CHANGED) {
859                         changes++;
860                         hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
861                                 added, &nadded, removed, &nremoved);
862                         /* Set it to NULL to prevent it from being freed
863                          * at the bottom of hpsa_update_scsi_devices()
864                          */
865                         sd[entry] = NULL;
866                 }
867                 i++;
868         }
869
870         /* Now, make sure every device listed in sd[] is also
871          * listed in h->dev[], adding them if they aren't found
872          */
873
874         for (i = 0; i < nsds; i++) {
875                 if (!sd[i]) /* if already added above. */
876                         continue;
877                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
878                                         h->ndevices, &entry);
879                 if (device_change == DEVICE_NOT_FOUND) {
880                         changes++;
881                         if (hpsa_scsi_add_entry(h, hostno, sd[i],
882                                 added, &nadded) != 0)
883                                 break;
884                         sd[i] = NULL; /* prevent from being freed later. */
885                 } else if (device_change == DEVICE_CHANGED) {
886                         /* should never happen... */
887                         changes++;
888                         dev_warn(&h->pdev->dev,
889                                 "device unexpectedly changed.\n");
890                         /* but if it does happen, we just ignore that device */
891                 }
892         }
893         spin_unlock_irqrestore(&h->devlock, flags);
894
895         /* Don't notify scsi mid layer of any changes the first time through
896          * (or if there are no changes) scsi_scan_host will do it later the
897          * first time through.
898          */
899         if (hostno == -1 || !changes)
900                 goto free_and_out;
901
902         sh = h->scsi_host;
903         /* Notify scsi mid layer of any removed devices */
904         for (i = 0; i < nremoved; i++) {
905                 struct scsi_device *sdev =
906                         scsi_device_lookup(sh, removed[i]->bus,
907                                 removed[i]->target, removed[i]->lun);
908                 if (sdev != NULL) {
909                         scsi_remove_device(sdev);
910                         scsi_device_put(sdev);
911                 } else {
912                         /* We don't expect to get here.
913                          * future cmds to this device will get selection
914                          * timeout as if the device was gone.
915                          */
916                         dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
917                                 " for removal.", hostno, removed[i]->bus,
918                                 removed[i]->target, removed[i]->lun);
919                 }
920                 kfree(removed[i]);
921                 removed[i] = NULL;
922         }
923
924         /* Notify scsi mid layer of any added devices */
925         for (i = 0; i < nadded; i++) {
926                 if (scsi_add_device(sh, added[i]->bus,
927                         added[i]->target, added[i]->lun) == 0)
928                         continue;
929                 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
930                         "device not added.\n", hostno, added[i]->bus,
931                         added[i]->target, added[i]->lun);
932                 /* now we have to remove it from h->dev,
933                  * since it didn't get added to scsi mid layer
934                  */
935                 fixup_botched_add(h, added[i]);
936         }
937
938 free_and_out:
939         kfree(added);
940         kfree(removed);
941 }
942
943 /*
944  * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
945  * Assume's h->devlock is held.
946  */
947 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
948         int bus, int target, int lun)
949 {
950         int i;
951         struct hpsa_scsi_dev_t *sd;
952
953         for (i = 0; i < h->ndevices; i++) {
954                 sd = h->dev[i];
955                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
956                         return sd;
957         }
958         return NULL;
959 }
960
961 /* link sdev->hostdata to our per-device structure. */
962 static int hpsa_slave_alloc(struct scsi_device *sdev)
963 {
964         struct hpsa_scsi_dev_t *sd;
965         unsigned long flags;
966         struct ctlr_info *h;
967
968         h = sdev_to_hba(sdev);
969         spin_lock_irqsave(&h->devlock, flags);
970         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
971                 sdev_id(sdev), sdev->lun);
972         if (sd != NULL)
973                 sdev->hostdata = sd;
974         spin_unlock_irqrestore(&h->devlock, flags);
975         return 0;
976 }
977
978 static void hpsa_slave_destroy(struct scsi_device *sdev)
979 {
980         /* nothing to do. */
981 }
982
983 static void hpsa_scsi_setup(struct ctlr_info *h)
984 {
985         h->ndevices = 0;
986         h->scsi_host = NULL;
987         spin_lock_init(&h->devlock);
988 }
989
990 static void complete_scsi_command(struct CommandList *cp,
991         int timeout, u32 tag)
992 {
993         struct scsi_cmnd *cmd;
994         struct ctlr_info *h;
995         struct ErrorInfo *ei;
996
997         unsigned char sense_key;
998         unsigned char asc;      /* additional sense code */
999         unsigned char ascq;     /* additional sense code qualifier */
1000
1001         ei = cp->err_info;
1002         cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1003         h = cp->h;
1004
1005         scsi_dma_unmap(cmd); /* undo the DMA mappings */
1006
1007         cmd->result = (DID_OK << 16);           /* host byte */
1008         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1009         cmd->result |= ei->ScsiStatus;
1010
1011         /* copy the sense data whether we need to or not. */
1012         memcpy(cmd->sense_buffer, ei->SenseInfo,
1013                 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
1014                         SCSI_SENSE_BUFFERSIZE :
1015                         ei->SenseLen);
1016         scsi_set_resid(cmd, ei->ResidualCnt);
1017
1018         if (ei->CommandStatus == 0) {
1019                 cmd->scsi_done(cmd);
1020                 cmd_free(h, cp);
1021                 return;
1022         }
1023
1024         /* an error has occurred */
1025         switch (ei->CommandStatus) {
1026
1027         case CMD_TARGET_STATUS:
1028                 if (ei->ScsiStatus) {
1029                         /* Get sense key */
1030                         sense_key = 0xf & ei->SenseInfo[2];
1031                         /* Get additional sense code */
1032                         asc = ei->SenseInfo[12];
1033                         /* Get addition sense code qualifier */
1034                         ascq = ei->SenseInfo[13];
1035                 }
1036
1037                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1038                         if (check_for_unit_attention(h, cp)) {
1039                                 cmd->result = DID_SOFT_ERROR << 16;
1040                                 break;
1041                         }
1042                         if (sense_key == ILLEGAL_REQUEST) {
1043                                 /*
1044                                  * SCSI REPORT_LUNS is commonly unsupported on
1045                                  * Smart Array.  Suppress noisy complaint.
1046                                  */
1047                                 if (cp->Request.CDB[0] == REPORT_LUNS)
1048                                         break;
1049
1050                                 /* If ASC/ASCQ indicate Logical Unit
1051                                  * Not Supported condition,
1052                                  */
1053                                 if ((asc == 0x25) && (ascq == 0x0)) {
1054                                         dev_warn(&h->pdev->dev, "cp %p "
1055                                                 "has check condition\n", cp);
1056                                         break;
1057                                 }
1058                         }
1059
1060                         if (sense_key == NOT_READY) {
1061                                 /* If Sense is Not Ready, Logical Unit
1062                                  * Not ready, Manual Intervention
1063                                  * required
1064                                  */
1065                                 if ((asc == 0x04) && (ascq == 0x03)) {
1066                                         dev_warn(&h->pdev->dev, "cp %p "
1067                                                 "has check condition: unit "
1068                                                 "not ready, manual "
1069                                                 "intervention required\n", cp);
1070                                         break;
1071                                 }
1072                         }
1073                         if (sense_key == ABORTED_COMMAND) {
1074                                 /* Aborted command is retryable */
1075                                 dev_warn(&h->pdev->dev, "cp %p "
1076                                         "has check condition: aborted command: "
1077                                         "ASC: 0x%x, ASCQ: 0x%x\n",
1078                                         cp, asc, ascq);
1079                                 cmd->result = DID_SOFT_ERROR << 16;
1080                                 break;
1081                         }
1082                         /* Must be some other type of check condition */
1083                         dev_warn(&h->pdev->dev, "cp %p has check condition: "
1084                                         "unknown type: "
1085                                         "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1086                                         "Returning result: 0x%x, "
1087                                         "cmd=[%02x %02x %02x %02x %02x "
1088                                         "%02x %02x %02x %02x %02x %02x "
1089                                         "%02x %02x %02x %02x %02x]\n",
1090                                         cp, sense_key, asc, ascq,
1091                                         cmd->result,
1092                                         cmd->cmnd[0], cmd->cmnd[1],
1093                                         cmd->cmnd[2], cmd->cmnd[3],
1094                                         cmd->cmnd[4], cmd->cmnd[5],
1095                                         cmd->cmnd[6], cmd->cmnd[7],
1096                                         cmd->cmnd[8], cmd->cmnd[9],
1097                                         cmd->cmnd[10], cmd->cmnd[11],
1098                                         cmd->cmnd[12], cmd->cmnd[13],
1099                                         cmd->cmnd[14], cmd->cmnd[15]);
1100                         break;
1101                 }
1102
1103
1104                 /* Problem was not a check condition
1105                  * Pass it up to the upper layers...
1106                  */
1107                 if (ei->ScsiStatus) {
1108                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1109                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1110                                 "Returning result: 0x%x\n",
1111                                 cp, ei->ScsiStatus,
1112                                 sense_key, asc, ascq,
1113                                 cmd->result);
1114                 } else {  /* scsi status is zero??? How??? */
1115                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1116                                 "Returning no connection.\n", cp),
1117
1118                         /* Ordinarily, this case should never happen,
1119                          * but there is a bug in some released firmware
1120                          * revisions that allows it to happen if, for
1121                          * example, a 4100 backplane loses power and
1122                          * the tape drive is in it.  We assume that
1123                          * it's a fatal error of some kind because we
1124                          * can't show that it wasn't. We will make it
1125                          * look like selection timeout since that is
1126                          * the most common reason for this to occur,
1127                          * and it's severe enough.
1128                          */
1129
1130                         cmd->result = DID_NO_CONNECT << 16;
1131                 }
1132                 break;
1133
1134         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1135                 break;
1136         case CMD_DATA_OVERRUN:
1137                 dev_warn(&h->pdev->dev, "cp %p has"
1138                         " completed with data overrun "
1139                         "reported\n", cp);
1140                 break;
1141         case CMD_INVALID: {
1142                 /* print_bytes(cp, sizeof(*cp), 1, 0);
1143                 print_cmd(cp); */
1144                 /* We get CMD_INVALID if you address a non-existent device
1145                  * instead of a selection timeout (no response).  You will
1146                  * see this if you yank out a drive, then try to access it.
1147                  * This is kind of a shame because it means that any other
1148                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
1149                  * missing target. */
1150                 cmd->result = DID_NO_CONNECT << 16;
1151         }
1152                 break;
1153         case CMD_PROTOCOL_ERR:
1154                 dev_warn(&h->pdev->dev, "cp %p has "
1155                         "protocol error \n", cp);
1156                 break;
1157         case CMD_HARDWARE_ERR:
1158                 cmd->result = DID_ERROR << 16;
1159                 dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
1160                 break;
1161         case CMD_CONNECTION_LOST:
1162                 cmd->result = DID_ERROR << 16;
1163                 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1164                 break;
1165         case CMD_ABORTED:
1166                 cmd->result = DID_ABORT << 16;
1167                 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1168                                 cp, ei->ScsiStatus);
1169                 break;
1170         case CMD_ABORT_FAILED:
1171                 cmd->result = DID_ERROR << 16;
1172                 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1173                 break;
1174         case CMD_UNSOLICITED_ABORT:
1175                 cmd->result = DID_RESET << 16;
1176                 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1177                         "abort\n", cp);
1178                 break;
1179         case CMD_TIMEOUT:
1180                 cmd->result = DID_TIME_OUT << 16;
1181                 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1182                 break;
1183         default:
1184                 cmd->result = DID_ERROR << 16;
1185                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1186                                 cp, ei->CommandStatus);
1187         }
1188         cmd->scsi_done(cmd);
1189         cmd_free(h, cp);
1190 }
1191
1192 static int hpsa_scsi_detect(struct ctlr_info *h)
1193 {
1194         struct Scsi_Host *sh;
1195         int error;
1196
1197         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1198         if (sh == NULL)
1199                 goto fail;
1200
1201         sh->io_port = 0;
1202         sh->n_io_port = 0;
1203         sh->this_id = -1;
1204         sh->max_channel = 3;
1205         sh->max_cmd_len = MAX_COMMAND_SIZE;
1206         sh->max_lun = HPSA_MAX_LUN;
1207         sh->max_id = HPSA_MAX_LUN;
1208         sh->can_queue = h->nr_cmds;
1209         sh->cmd_per_lun = h->nr_cmds;
1210         h->scsi_host = sh;
1211         sh->hostdata[0] = (unsigned long) h;
1212         sh->irq = h->intr[PERF_MODE_INT];
1213         sh->unique_id = sh->irq;
1214         error = scsi_add_host(sh, &h->pdev->dev);
1215         if (error)
1216                 goto fail_host_put;
1217         scsi_scan_host(sh);
1218         return 0;
1219
1220  fail_host_put:
1221         dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1222                 " failed for controller %d\n", h->ctlr);
1223         scsi_host_put(sh);
1224         return error;
1225  fail:
1226         dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1227                 " failed for controller %d\n", h->ctlr);
1228         return -ENOMEM;
1229 }
1230
1231 static void hpsa_pci_unmap(struct pci_dev *pdev,
1232         struct CommandList *c, int sg_used, int data_direction)
1233 {
1234         int i;
1235         union u64bit addr64;
1236
1237         for (i = 0; i < sg_used; i++) {
1238                 addr64.val32.lower = c->SG[i].Addr.lower;
1239                 addr64.val32.upper = c->SG[i].Addr.upper;
1240                 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1241                         data_direction);
1242         }
1243 }
1244
1245 static void hpsa_map_one(struct pci_dev *pdev,
1246                 struct CommandList *cp,
1247                 unsigned char *buf,
1248                 size_t buflen,
1249                 int data_direction)
1250 {
1251         u64 addr64;
1252
1253         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1254                 cp->Header.SGList = 0;
1255                 cp->Header.SGTotal = 0;
1256                 return;
1257         }
1258
1259         addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1260         cp->SG[0].Addr.lower =
1261           (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1262         cp->SG[0].Addr.upper =
1263           (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1264         cp->SG[0].Len = buflen;
1265         cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
1266         cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1267 }
1268
1269 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1270         struct CommandList *c)
1271 {
1272         DECLARE_COMPLETION_ONSTACK(wait);
1273
1274         c->waiting = &wait;
1275         enqueue_cmd_and_start_io(h, c);
1276         wait_for_completion(&wait);
1277 }
1278
1279 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1280         struct CommandList *c, int data_direction)
1281 {
1282         int retry_count = 0;
1283
1284         do {
1285                 memset(c->err_info, 0, sizeof(c->err_info));
1286                 hpsa_scsi_do_simple_cmd_core(h, c);
1287                 retry_count++;
1288         } while (check_for_unit_attention(h, c) && retry_count <= 3);
1289         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1290 }
1291
1292 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1293 {
1294         struct ErrorInfo *ei;
1295         struct device *d = &cp->h->pdev->dev;
1296
1297         ei = cp->err_info;
1298         switch (ei->CommandStatus) {
1299         case CMD_TARGET_STATUS:
1300                 dev_warn(d, "cmd %p has completed with errors\n", cp);
1301                 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1302                                 ei->ScsiStatus);
1303                 if (ei->ScsiStatus == 0)
1304                         dev_warn(d, "SCSI status is abnormally zero.  "
1305                         "(probably indicates selection timeout "
1306                         "reported incorrectly due to a known "
1307                         "firmware bug, circa July, 2001.)\n");
1308                 break;
1309         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1310                         dev_info(d, "UNDERRUN\n");
1311                 break;
1312         case CMD_DATA_OVERRUN:
1313                 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1314                 break;
1315         case CMD_INVALID: {
1316                 /* controller unfortunately reports SCSI passthru's
1317                  * to non-existent targets as invalid commands.
1318                  */
1319                 dev_warn(d, "cp %p is reported invalid (probably means "
1320                         "target device no longer present)\n", cp);
1321                 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1322                 print_cmd(cp);  */
1323                 }
1324                 break;
1325         case CMD_PROTOCOL_ERR:
1326                 dev_warn(d, "cp %p has protocol error \n", cp);
1327                 break;
1328         case CMD_HARDWARE_ERR:
1329                 /* cmd->result = DID_ERROR << 16; */
1330                 dev_warn(d, "cp %p had hardware error\n", cp);
1331                 break;
1332         case CMD_CONNECTION_LOST:
1333                 dev_warn(d, "cp %p had connection lost\n", cp);
1334                 break;
1335         case CMD_ABORTED:
1336                 dev_warn(d, "cp %p was aborted\n", cp);
1337                 break;
1338         case CMD_ABORT_FAILED:
1339                 dev_warn(d, "cp %p reports abort failed\n", cp);
1340                 break;
1341         case CMD_UNSOLICITED_ABORT:
1342                 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1343                 break;
1344         case CMD_TIMEOUT:
1345                 dev_warn(d, "cp %p timed out\n", cp);
1346                 break;
1347         default:
1348                 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1349                                 ei->CommandStatus);
1350         }
1351 }
1352
1353 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1354                         unsigned char page, unsigned char *buf,
1355                         unsigned char bufsize)
1356 {
1357         int rc = IO_OK;
1358         struct CommandList *c;
1359         struct ErrorInfo *ei;
1360
1361         c = cmd_special_alloc(h);
1362
1363         if (c == NULL) {                        /* trouble... */
1364                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1365                 return -ENOMEM;
1366         }
1367
1368         fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1369         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1370         ei = c->err_info;
1371         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1372                 hpsa_scsi_interpret_error(c);
1373                 rc = -1;
1374         }
1375         cmd_special_free(h, c);
1376         return rc;
1377 }
1378
1379 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1380 {
1381         int rc = IO_OK;
1382         struct CommandList *c;
1383         struct ErrorInfo *ei;
1384
1385         c = cmd_special_alloc(h);
1386
1387         if (c == NULL) {                        /* trouble... */
1388                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1389                 return -ENOMEM;
1390         }
1391
1392         fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1393         hpsa_scsi_do_simple_cmd_core(h, c);
1394         /* no unmap needed here because no data xfer. */
1395
1396         ei = c->err_info;
1397         if (ei->CommandStatus != 0) {
1398                 hpsa_scsi_interpret_error(c);
1399                 rc = -1;
1400         }
1401         cmd_special_free(h, c);
1402         return rc;
1403 }
1404
1405 static void hpsa_get_raid_level(struct ctlr_info *h,
1406         unsigned char *scsi3addr, unsigned char *raid_level)
1407 {
1408         int rc;
1409         unsigned char *buf;
1410
1411         *raid_level = RAID_UNKNOWN;
1412         buf = kzalloc(64, GFP_KERNEL);
1413         if (!buf)
1414                 return;
1415         rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1416         if (rc == 0)
1417                 *raid_level = buf[8];
1418         if (*raid_level > RAID_UNKNOWN)
1419                 *raid_level = RAID_UNKNOWN;
1420         kfree(buf);
1421         return;
1422 }
1423
1424 /* Get the device id from inquiry page 0x83 */
1425 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1426         unsigned char *device_id, int buflen)
1427 {
1428         int rc;
1429         unsigned char *buf;
1430
1431         if (buflen > 16)
1432                 buflen = 16;
1433         buf = kzalloc(64, GFP_KERNEL);
1434         if (!buf)
1435                 return -1;
1436         rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1437         if (rc == 0)
1438                 memcpy(device_id, &buf[8], buflen);
1439         kfree(buf);
1440         return rc != 0;
1441 }
1442
1443 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1444                 struct ReportLUNdata *buf, int bufsize,
1445                 int extended_response)
1446 {
1447         int rc = IO_OK;
1448         struct CommandList *c;
1449         unsigned char scsi3addr[8];
1450         struct ErrorInfo *ei;
1451
1452         c = cmd_special_alloc(h);
1453         if (c == NULL) {                        /* trouble... */
1454                 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1455                 return -1;
1456         }
1457         /* address the controller */
1458         memset(scsi3addr, 0, sizeof(scsi3addr));
1459         fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1460                 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1461         if (extended_response)
1462                 c->Request.CDB[1] = extended_response;
1463         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1464         ei = c->err_info;
1465         if (ei->CommandStatus != 0 &&
1466             ei->CommandStatus != CMD_DATA_UNDERRUN) {
1467                 hpsa_scsi_interpret_error(c);
1468                 rc = -1;
1469         }
1470         cmd_special_free(h, c);
1471         return rc;
1472 }
1473
1474 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1475                 struct ReportLUNdata *buf,
1476                 int bufsize, int extended_response)
1477 {
1478         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1479 }
1480
1481 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1482                 struct ReportLUNdata *buf, int bufsize)
1483 {
1484         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1485 }
1486
1487 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1488         int bus, int target, int lun)
1489 {
1490         device->bus = bus;
1491         device->target = target;
1492         device->lun = lun;
1493 }
1494
1495 static int hpsa_update_device_info(struct ctlr_info *h,
1496         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1497 {
1498 #define OBDR_TAPE_INQ_SIZE 49
1499         unsigned char *inq_buff;
1500
1501         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1502         if (!inq_buff)
1503                 goto bail_out;
1504
1505         /* Do an inquiry to the device to see what it is. */
1506         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1507                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1508                 /* Inquiry failed (msg printed already) */
1509                 dev_err(&h->pdev->dev,
1510                         "hpsa_update_device_info: inquiry failed\n");
1511                 goto bail_out;
1512         }
1513
1514         /* As a side effect, record the firmware version number
1515          * if we happen to be talking to the RAID controller.
1516          */
1517         if (is_hba_lunid(scsi3addr))
1518                 memcpy(h->firm_ver, &inq_buff[32], 4);
1519
1520         this_device->devtype = (inq_buff[0] & 0x1f);
1521         memcpy(this_device->scsi3addr, scsi3addr, 8);
1522         memcpy(this_device->vendor, &inq_buff[8],
1523                 sizeof(this_device->vendor));
1524         memcpy(this_device->model, &inq_buff[16],
1525                 sizeof(this_device->model));
1526         memcpy(this_device->revision, &inq_buff[32],
1527                 sizeof(this_device->revision));
1528         memset(this_device->device_id, 0,
1529                 sizeof(this_device->device_id));
1530         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1531                 sizeof(this_device->device_id));
1532
1533         if (this_device->devtype == TYPE_DISK &&
1534                 is_logical_dev_addr_mode(scsi3addr))
1535                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1536         else
1537                 this_device->raid_level = RAID_UNKNOWN;
1538
1539         kfree(inq_buff);
1540         return 0;
1541
1542 bail_out:
1543         kfree(inq_buff);
1544         return 1;
1545 }
1546
1547 static unsigned char *msa2xxx_model[] = {
1548         "MSA2012",
1549         "MSA2024",
1550         "MSA2312",
1551         "MSA2324",
1552         NULL,
1553 };
1554
1555 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1556 {
1557         int i;
1558
1559         for (i = 0; msa2xxx_model[i]; i++)
1560                 if (strncmp(device->model, msa2xxx_model[i],
1561                         strlen(msa2xxx_model[i])) == 0)
1562                         return 1;
1563         return 0;
1564 }
1565
1566 /* Helper function to assign bus, target, lun mapping of devices.
1567  * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1568  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1569  * Logical drive target and lun are assigned at this time, but
1570  * physical device lun and target assignment are deferred (assigned
1571  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1572  */
1573 static void figure_bus_target_lun(struct ctlr_info *h,
1574         u8 *lunaddrbytes, int *bus, int *target, int *lun,
1575         struct hpsa_scsi_dev_t *device)
1576 {
1577         u32 lunid;
1578
1579         if (is_logical_dev_addr_mode(lunaddrbytes)) {
1580                 /* logical device */
1581                 if (unlikely(is_scsi_rev_5(h))) {
1582                         /* p1210m, logical drives lun assignments
1583                          * match SCSI REPORT LUNS data.
1584                          */
1585                         lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1586                         *bus = 0;
1587                         *target = 0;
1588                         *lun = (lunid & 0x3fff) + 1;
1589                 } else {
1590                         /* not p1210m... */
1591                         lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1592                         if (is_msa2xxx(h, device)) {
1593                                 /* msa2xxx way, put logicals on bus 1
1594                                  * and match target/lun numbers box
1595                                  * reports.
1596                                  */
1597                                 *bus = 1;
1598                                 *target = (lunid >> 16) & 0x3fff;
1599                                 *lun = lunid & 0x00ff;
1600                         } else {
1601                                 /* Traditional smart array way. */
1602                                 *bus = 0;
1603                                 *lun = 0;
1604                                 *target = lunid & 0x3fff;
1605                         }
1606                 }
1607         } else {
1608                 /* physical device */
1609                 if (is_hba_lunid(lunaddrbytes))
1610                         if (unlikely(is_scsi_rev_5(h))) {
1611                                 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1612                                 *target = 0;
1613                                 *lun = 0;
1614                                 return;
1615                         } else
1616                                 *bus = 3; /* traditional smartarray */
1617                 else
1618                         *bus = 2; /* physical disk */
1619                 *target = -1;
1620                 *lun = -1; /* we will fill these in later. */
1621         }
1622 }
1623
1624 /*
1625  * If there is no lun 0 on a target, linux won't find any devices.
1626  * For the MSA2xxx boxes, we have to manually detect the enclosure
1627  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1628  * it for some reason.  *tmpdevice is the target we're adding,
1629  * this_device is a pointer into the current element of currentsd[]
1630  * that we're building up in update_scsi_devices(), below.
1631  * lunzerobits is a bitmap that tracks which targets already have a
1632  * lun 0 assigned.
1633  * Returns 1 if an enclosure was added, 0 if not.
1634  */
1635 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1636         struct hpsa_scsi_dev_t *tmpdevice,
1637         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1638         int bus, int target, int lun, unsigned long lunzerobits[],
1639         int *nmsa2xxx_enclosures)
1640 {
1641         unsigned char scsi3addr[8];
1642
1643         if (test_bit(target, lunzerobits))
1644                 return 0; /* There is already a lun 0 on this target. */
1645
1646         if (!is_logical_dev_addr_mode(lunaddrbytes))
1647                 return 0; /* It's the logical targets that may lack lun 0. */
1648
1649         if (!is_msa2xxx(h, tmpdevice))
1650                 return 0; /* It's only the MSA2xxx that have this problem. */
1651
1652         if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1653                 return 0;
1654
1655         if (is_hba_lunid(scsi3addr))
1656                 return 0; /* Don't add the RAID controller here. */
1657
1658         if (is_scsi_rev_5(h))
1659                 return 0; /* p1210m doesn't need to do this. */
1660
1661 #define MAX_MSA2XXX_ENCLOSURES 32
1662         if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1663                 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1664                         "enclosures exceeded.  Check your hardware "
1665                         "configuration.");
1666                 return 0;
1667         }
1668
1669         memset(scsi3addr, 0, 8);
1670         scsi3addr[3] = target;
1671         if (hpsa_update_device_info(h, scsi3addr, this_device))
1672                 return 0;
1673         (*nmsa2xxx_enclosures)++;
1674         hpsa_set_bus_target_lun(this_device, bus, target, 0);
1675         set_bit(target, lunzerobits);
1676         return 1;
1677 }
1678
1679 /*
1680  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
1681  * logdev.  The number of luns in physdev and logdev are returned in
1682  * *nphysicals and *nlogicals, respectively.
1683  * Returns 0 on success, -1 otherwise.
1684  */
1685 static int hpsa_gather_lun_info(struct ctlr_info *h,
1686         int reportlunsize,
1687         struct ReportLUNdata *physdev, u32 *nphysicals,
1688         struct ReportLUNdata *logdev, u32 *nlogicals)
1689 {
1690         if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1691                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1692                 return -1;
1693         }
1694         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1695         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1696                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1697                         "  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1698                         *nphysicals - HPSA_MAX_PHYS_LUN);
1699                 *nphysicals = HPSA_MAX_PHYS_LUN;
1700         }
1701         if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1702                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1703                 return -1;
1704         }
1705         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1706         /* Reject Logicals in excess of our max capability. */
1707         if (*nlogicals > HPSA_MAX_LUN) {
1708                 dev_warn(&h->pdev->dev,
1709                         "maximum logical LUNs (%d) exceeded.  "
1710                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
1711                         *nlogicals - HPSA_MAX_LUN);
1712                         *nlogicals = HPSA_MAX_LUN;
1713         }
1714         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1715                 dev_warn(&h->pdev->dev,
1716                         "maximum logical + physical LUNs (%d) exceeded. "
1717                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1718                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1719                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1720         }
1721         return 0;
1722 }
1723
1724 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1725         int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1726         struct ReportLUNdata *logdev_list)
1727 {
1728         /* Helper function, figure out where the LUN ID info is coming from
1729          * given index i, lists of physical and logical devices, where in
1730          * the list the raid controller is supposed to appear (first or last)
1731          */
1732
1733         int logicals_start = nphysicals + (raid_ctlr_position == 0);
1734         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1735
1736         if (i == raid_ctlr_position)
1737                 return RAID_CTLR_LUNID;
1738
1739         if (i < logicals_start)
1740                 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1741
1742         if (i < last_device)
1743                 return &logdev_list->LUN[i - nphysicals -
1744                         (raid_ctlr_position == 0)][0];
1745         BUG();
1746         return NULL;
1747 }
1748
1749 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1750 {
1751         /* the idea here is we could get notified
1752          * that some devices have changed, so we do a report
1753          * physical luns and report logical luns cmd, and adjust
1754          * our list of devices accordingly.
1755          *
1756          * The scsi3addr's of devices won't change so long as the
1757          * adapter is not reset.  That means we can rescan and
1758          * tell which devices we already know about, vs. new
1759          * devices, vs.  disappearing devices.
1760          */
1761         struct ReportLUNdata *physdev_list = NULL;
1762         struct ReportLUNdata *logdev_list = NULL;
1763         unsigned char *inq_buff = NULL;
1764         u32 nphysicals = 0;
1765         u32 nlogicals = 0;
1766         u32 ndev_allocated = 0;
1767         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1768         int ncurrent = 0;
1769         int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1770         int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1771         int bus, target, lun;
1772         int raid_ctlr_position;
1773         DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1774
1775         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1776                 GFP_KERNEL);
1777         physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1778         logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1779         inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1780         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1781
1782         if (!currentsd || !physdev_list || !logdev_list ||
1783                 !inq_buff || !tmpdevice) {
1784                 dev_err(&h->pdev->dev, "out of memory\n");
1785                 goto out;
1786         }
1787         memset(lunzerobits, 0, sizeof(lunzerobits));
1788
1789         if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1790                         logdev_list, &nlogicals))
1791                 goto out;
1792
1793         /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1794          * but each of them 4 times through different paths.  The plus 1
1795          * is for the RAID controller.
1796          */
1797         ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1798
1799         /* Allocate the per device structures */
1800         for (i = 0; i < ndevs_to_allocate; i++) {
1801                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1802                 if (!currentsd[i]) {
1803                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1804                                 __FILE__, __LINE__);
1805                         goto out;
1806                 }
1807                 ndev_allocated++;
1808         }
1809
1810         if (unlikely(is_scsi_rev_5(h)))
1811                 raid_ctlr_position = 0;
1812         else
1813                 raid_ctlr_position = nphysicals + nlogicals;
1814
1815         /* adjust our table of devices */
1816         nmsa2xxx_enclosures = 0;
1817         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1818                 u8 *lunaddrbytes;
1819
1820                 /* Figure out where the LUN ID info is coming from */
1821                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1822                         i, nphysicals, nlogicals, physdev_list, logdev_list);
1823                 /* skip masked physical devices. */
1824                 if (lunaddrbytes[3] & 0xC0 &&
1825                         i < nphysicals + (raid_ctlr_position == 0))
1826                         continue;
1827
1828                 /* Get device type, vendor, model, device id */
1829                 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1830                         continue; /* skip it if we can't talk to it. */
1831                 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1832                         tmpdevice);
1833                 this_device = currentsd[ncurrent];
1834
1835                 /*
1836                  * For the msa2xxx boxes, we have to insert a LUN 0 which
1837                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1838                  * is nonetheless an enclosure device there.  We have to
1839                  * present that otherwise linux won't find anything if
1840                  * there is no lun 0.
1841                  */
1842                 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1843                                 lunaddrbytes, bus, target, lun, lunzerobits,
1844                                 &nmsa2xxx_enclosures)) {
1845                         ncurrent++;
1846                         this_device = currentsd[ncurrent];
1847                 }
1848
1849                 *this_device = *tmpdevice;
1850                 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1851
1852                 switch (this_device->devtype) {
1853                 case TYPE_ROM: {
1854                         /* We don't *really* support actual CD-ROM devices,
1855                          * just "One Button Disaster Recovery" tape drive
1856                          * which temporarily pretends to be a CD-ROM drive.
1857                          * So we check that the device is really an OBDR tape
1858                          * device by checking for "$DR-10" in bytes 43-48 of
1859                          * the inquiry data.
1860                          */
1861                                 char obdr_sig[7];
1862 #define OBDR_TAPE_SIG "$DR-10"
1863                                 strncpy(obdr_sig, &inq_buff[43], 6);
1864                                 obdr_sig[6] = '\0';
1865                                 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1866                                         /* Not OBDR device, ignore it. */
1867                                         break;
1868                         }
1869                         ncurrent++;
1870                         break;
1871                 case TYPE_DISK:
1872                         if (i < nphysicals)
1873                                 break;
1874                         ncurrent++;
1875                         break;
1876                 case TYPE_TAPE:
1877                 case TYPE_MEDIUM_CHANGER:
1878                         ncurrent++;
1879                         break;
1880                 case TYPE_RAID:
1881                         /* Only present the Smartarray HBA as a RAID controller.
1882                          * If it's a RAID controller other than the HBA itself
1883                          * (an external RAID controller, MSA500 or similar)
1884                          * don't present it.
1885                          */
1886                         if (!is_hba_lunid(lunaddrbytes))
1887                                 break;
1888                         ncurrent++;
1889                         break;
1890                 default:
1891                         break;
1892                 }
1893                 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1894                         break;
1895         }
1896         adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1897 out:
1898         kfree(tmpdevice);
1899         for (i = 0; i < ndev_allocated; i++)
1900                 kfree(currentsd[i]);
1901         kfree(currentsd);
1902         kfree(inq_buff);
1903         kfree(physdev_list);
1904         kfree(logdev_list);
1905 }
1906
1907 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1908  * dma mapping  and fills in the scatter gather entries of the
1909  * hpsa command, cp.
1910  */
1911 static int hpsa_scatter_gather(struct pci_dev *pdev,
1912                 struct CommandList *cp,
1913                 struct scsi_cmnd *cmd)
1914 {
1915         unsigned int len;
1916         struct scatterlist *sg;
1917         u64 addr64;
1918         int use_sg, i;
1919
1920         BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
1921
1922         use_sg = scsi_dma_map(cmd);
1923         if (use_sg < 0)
1924                 return use_sg;
1925
1926         if (!use_sg)
1927                 goto sglist_finished;
1928
1929         scsi_for_each_sg(cmd, sg, use_sg, i) {
1930                 addr64 = (u64) sg_dma_address(sg);
1931                 len  = sg_dma_len(sg);
1932                 cp->SG[i].Addr.lower =
1933                         (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1934                 cp->SG[i].Addr.upper =
1935                         (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1936                 cp->SG[i].Len = len;
1937                 cp->SG[i].Ext = 0;  /* we are not chaining */
1938         }
1939
1940 sglist_finished:
1941
1942         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
1943         cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1944         return 0;
1945 }
1946
1947
1948 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1949         void (*done)(struct scsi_cmnd *))
1950 {
1951         struct ctlr_info *h;
1952         struct hpsa_scsi_dev_t *dev;
1953         unsigned char scsi3addr[8];
1954         struct CommandList *c;
1955         unsigned long flags;
1956
1957         /* Get the ptr to our adapter structure out of cmd->host. */
1958         h = sdev_to_hba(cmd->device);
1959         dev = cmd->device->hostdata;
1960         if (!dev) {
1961                 cmd->result = DID_NO_CONNECT << 16;
1962                 done(cmd);
1963                 return 0;
1964         }
1965         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1966
1967         /* Need a lock as this is being allocated from the pool */
1968         spin_lock_irqsave(&h->lock, flags);
1969         c = cmd_alloc(h);
1970         spin_unlock_irqrestore(&h->lock, flags);
1971         if (c == NULL) {                        /* trouble... */
1972                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1973                 return SCSI_MLQUEUE_HOST_BUSY;
1974         }
1975
1976         /* Fill in the command list header */
1977
1978         cmd->scsi_done = done;    /* save this for use by completion code */
1979
1980         /* save c in case we have to abort it  */
1981         cmd->host_scribble = (unsigned char *) c;
1982
1983         c->cmd_type = CMD_SCSI;
1984         c->scsi_cmd = cmd;
1985         c->Header.ReplyQueue = 0;  /* unused in simple mode */
1986         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1987         c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1988         c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1989
1990         /* Fill in the request block... */
1991
1992         c->Request.Timeout = 0;
1993         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1994         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1995         c->Request.CDBLen = cmd->cmd_len;
1996         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1997         c->Request.Type.Type = TYPE_CMD;
1998         c->Request.Type.Attribute = ATTR_SIMPLE;
1999         switch (cmd->sc_data_direction) {
2000         case DMA_TO_DEVICE:
2001                 c->Request.Type.Direction = XFER_WRITE;
2002                 break;
2003         case DMA_FROM_DEVICE:
2004                 c->Request.Type.Direction = XFER_READ;
2005                 break;
2006         case DMA_NONE:
2007                 c->Request.Type.Direction = XFER_NONE;
2008                 break;
2009         case DMA_BIDIRECTIONAL:
2010                 /* This can happen if a buggy application does a scsi passthru
2011                  * and sets both inlen and outlen to non-zero. ( see
2012                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2013                  */
2014
2015                 c->Request.Type.Direction = XFER_RSVD;
2016                 /* This is technically wrong, and hpsa controllers should
2017                  * reject it with CMD_INVALID, which is the most correct
2018                  * response, but non-fibre backends appear to let it
2019                  * slide by, and give the same results as if this field
2020                  * were set correctly.  Either way is acceptable for
2021                  * our purposes here.
2022                  */
2023
2024                 break;
2025
2026         default:
2027                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2028                         cmd->sc_data_direction);
2029                 BUG();
2030                 break;
2031         }
2032
2033         if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
2034                 cmd_free(h, c);
2035                 return SCSI_MLQUEUE_HOST_BUSY;
2036         }
2037         enqueue_cmd_and_start_io(h, c);
2038         /* the cmd'll come back via intr handler in complete_scsi_command()  */
2039         return 0;
2040 }
2041
2042 static void hpsa_scan_start(struct Scsi_Host *sh)
2043 {
2044         struct ctlr_info *h = shost_to_hba(sh);
2045         unsigned long flags;
2046
2047         /* wait until any scan already in progress is finished. */
2048         while (1) {
2049                 spin_lock_irqsave(&h->scan_lock, flags);
2050                 if (h->scan_finished)
2051                         break;
2052                 spin_unlock_irqrestore(&h->scan_lock, flags);
2053                 wait_event(h->scan_wait_queue, h->scan_finished);
2054                 /* Note: We don't need to worry about a race between this
2055                  * thread and driver unload because the midlayer will
2056                  * have incremented the reference count, so unload won't
2057                  * happen if we're in here.
2058                  */
2059         }
2060         h->scan_finished = 0; /* mark scan as in progress */
2061         spin_unlock_irqrestore(&h->scan_lock, flags);
2062
2063         hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2064
2065         spin_lock_irqsave(&h->scan_lock, flags);
2066         h->scan_finished = 1; /* mark scan as finished. */
2067         wake_up_all(&h->scan_wait_queue);
2068         spin_unlock_irqrestore(&h->scan_lock, flags);
2069 }
2070
2071 static int hpsa_scan_finished(struct Scsi_Host *sh,
2072         unsigned long elapsed_time)
2073 {
2074         struct ctlr_info *h = shost_to_hba(sh);
2075         unsigned long flags;
2076         int finished;
2077
2078         spin_lock_irqsave(&h->scan_lock, flags);
2079         finished = h->scan_finished;
2080         spin_unlock_irqrestore(&h->scan_lock, flags);
2081         return finished;
2082 }
2083
2084 static int hpsa_change_queue_depth(struct scsi_device *sdev,
2085         int qdepth, int reason)
2086 {
2087         struct ctlr_info *h = sdev_to_hba(sdev);
2088
2089         if (reason != SCSI_QDEPTH_DEFAULT)
2090                 return -ENOTSUPP;
2091
2092         if (qdepth < 1)
2093                 qdepth = 1;
2094         else
2095                 if (qdepth > h->nr_cmds)
2096                         qdepth = h->nr_cmds;
2097         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2098         return sdev->queue_depth;
2099 }
2100
2101 static void hpsa_unregister_scsi(struct ctlr_info *h)
2102 {
2103         /* we are being forcibly unloaded, and may not refuse. */
2104         scsi_remove_host(h->scsi_host);
2105         scsi_host_put(h->scsi_host);
2106         h->scsi_host = NULL;
2107 }
2108
2109 static int hpsa_register_scsi(struct ctlr_info *h)
2110 {
2111         int rc;
2112
2113         rc = hpsa_scsi_detect(h);
2114         if (rc != 0)
2115                 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2116                         " hpsa_scsi_detect(), rc is %d\n", rc);
2117         return rc;
2118 }
2119
2120 static int wait_for_device_to_become_ready(struct ctlr_info *h,
2121         unsigned char lunaddr[])
2122 {
2123         int rc = 0;
2124         int count = 0;
2125         int waittime = 1; /* seconds */
2126         struct CommandList *c;
2127
2128         c = cmd_special_alloc(h);
2129         if (!c) {
2130                 dev_warn(&h->pdev->dev, "out of memory in "
2131                         "wait_for_device_to_become_ready.\n");
2132                 return IO_ERROR;
2133         }
2134
2135         /* Send test unit ready until device ready, or give up. */
2136         while (count < HPSA_TUR_RETRY_LIMIT) {
2137
2138                 /* Wait for a bit.  do this first, because if we send
2139                  * the TUR right away, the reset will just abort it.
2140                  */
2141                 msleep(1000 * waittime);
2142                 count++;
2143
2144                 /* Increase wait time with each try, up to a point. */
2145                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2146                         waittime = waittime * 2;
2147
2148                 /* Send the Test Unit Ready */
2149                 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2150                 hpsa_scsi_do_simple_cmd_core(h, c);
2151                 /* no unmap needed here because no data xfer. */
2152
2153                 if (c->err_info->CommandStatus == CMD_SUCCESS)
2154                         break;
2155
2156                 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2157                         c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2158                         (c->err_info->SenseInfo[2] == NO_SENSE ||
2159                         c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2160                         break;
2161
2162                 dev_warn(&h->pdev->dev, "waiting %d secs "
2163                         "for device to become ready.\n", waittime);
2164                 rc = 1; /* device not ready. */
2165         }
2166
2167         if (rc)
2168                 dev_warn(&h->pdev->dev, "giving up on device.\n");
2169         else
2170                 dev_warn(&h->pdev->dev, "device is ready.\n");
2171
2172         cmd_special_free(h, c);
2173         return rc;
2174 }
2175
2176 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2177  * complaining.  Doing a host- or bus-reset can't do anything good here.
2178  */
2179 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2180 {
2181         int rc;
2182         struct ctlr_info *h;
2183         struct hpsa_scsi_dev_t *dev;
2184
2185         /* find the controller to which the command to be aborted was sent */
2186         h = sdev_to_hba(scsicmd->device);
2187         if (h == NULL) /* paranoia */
2188                 return FAILED;
2189         dev = scsicmd->device->hostdata;
2190         if (!dev) {
2191                 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2192                         "device lookup failed.\n");
2193                 return FAILED;
2194         }
2195         dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2196                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2197         /* send a reset to the SCSI LUN which the command was sent to */
2198         rc = hpsa_send_reset(h, dev->scsi3addr);
2199         if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2200                 return SUCCESS;
2201
2202         dev_warn(&h->pdev->dev, "resetting device failed.\n");
2203         return FAILED;
2204 }
2205
2206 /*
2207  * For operations that cannot sleep, a command block is allocated at init,
2208  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2209  * which ones are free or in use.  Lock must be held when calling this.
2210  * cmd_free() is the complement.
2211  */
2212 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2213 {
2214         struct CommandList *c;
2215         int i;
2216         union u64bit temp64;
2217         dma_addr_t cmd_dma_handle, err_dma_handle;
2218
2219         do {
2220                 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2221                 if (i == h->nr_cmds)
2222                         return NULL;
2223         } while (test_and_set_bit
2224                  (i & (BITS_PER_LONG - 1),
2225                   h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2226         c = h->cmd_pool + i;
2227         memset(c, 0, sizeof(*c));
2228         cmd_dma_handle = h->cmd_pool_dhandle
2229             + i * sizeof(*c);
2230         c->err_info = h->errinfo_pool + i;
2231         memset(c->err_info, 0, sizeof(*c->err_info));
2232         err_dma_handle = h->errinfo_pool_dhandle
2233             + i * sizeof(*c->err_info);
2234         h->nr_allocs++;
2235
2236         c->cmdindex = i;
2237
2238         INIT_HLIST_NODE(&c->list);
2239         c->busaddr = (u32) cmd_dma_handle;
2240         temp64.val = (u64) err_dma_handle;
2241         c->ErrDesc.Addr.lower = temp64.val32.lower;
2242         c->ErrDesc.Addr.upper = temp64.val32.upper;
2243         c->ErrDesc.Len = sizeof(*c->err_info);
2244
2245         c->h = h;
2246         return c;
2247 }
2248
2249 /* For operations that can wait for kmalloc to possibly sleep,
2250  * this routine can be called. Lock need not be held to call
2251  * cmd_special_alloc. cmd_special_free() is the complement.
2252  */
2253 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2254 {
2255         struct CommandList *c;
2256         union u64bit temp64;
2257         dma_addr_t cmd_dma_handle, err_dma_handle;
2258
2259         c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2260         if (c == NULL)
2261                 return NULL;
2262         memset(c, 0, sizeof(*c));
2263
2264         c->cmdindex = -1;
2265
2266         c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2267                     &err_dma_handle);
2268
2269         if (c->err_info == NULL) {
2270                 pci_free_consistent(h->pdev,
2271                         sizeof(*c), c, cmd_dma_handle);
2272                 return NULL;
2273         }
2274         memset(c->err_info, 0, sizeof(*c->err_info));
2275
2276         INIT_HLIST_NODE(&c->list);
2277         c->busaddr = (u32) cmd_dma_handle;
2278         temp64.val = (u64) err_dma_handle;
2279         c->ErrDesc.Addr.lower = temp64.val32.lower;
2280         c->ErrDesc.Addr.upper = temp64.val32.upper;
2281         c->ErrDesc.Len = sizeof(*c->err_info);
2282
2283         c->h = h;
2284         return c;
2285 }
2286
2287 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2288 {
2289         int i;
2290
2291         i = c - h->cmd_pool;
2292         clear_bit(i & (BITS_PER_LONG - 1),
2293                   h->cmd_pool_bits + (i / BITS_PER_LONG));
2294         h->nr_frees++;
2295 }
2296
2297 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2298 {
2299         union u64bit temp64;
2300
2301         temp64.val32.lower = c->ErrDesc.Addr.lower;
2302         temp64.val32.upper = c->ErrDesc.Addr.upper;
2303         pci_free_consistent(h->pdev, sizeof(*c->err_info),
2304                             c->err_info, (dma_addr_t) temp64.val);
2305         pci_free_consistent(h->pdev, sizeof(*c),
2306                             c, (dma_addr_t) c->busaddr);
2307 }
2308
2309 #ifdef CONFIG_COMPAT
2310
2311 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2312 {
2313         IOCTL32_Command_struct __user *arg32 =
2314             (IOCTL32_Command_struct __user *) arg;
2315         IOCTL_Command_struct arg64;
2316         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2317         int err;
2318         u32 cp;
2319
2320         err = 0;
2321         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2322                            sizeof(arg64.LUN_info));
2323         err |= copy_from_user(&arg64.Request, &arg32->Request,
2324                            sizeof(arg64.Request));
2325         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2326                            sizeof(arg64.error_info));
2327         err |= get_user(arg64.buf_size, &arg32->buf_size);
2328         err |= get_user(cp, &arg32->buf);
2329         arg64.buf = compat_ptr(cp);
2330         err |= copy_to_user(p, &arg64, sizeof(arg64));
2331
2332         if (err)
2333                 return -EFAULT;
2334
2335         err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2336         if (err)
2337                 return err;
2338         err |= copy_in_user(&arg32->error_info, &p->error_info,
2339                          sizeof(arg32->error_info));
2340         if (err)
2341                 return -EFAULT;
2342         return err;
2343 }
2344
2345 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2346         int cmd, void *arg)
2347 {
2348         BIG_IOCTL32_Command_struct __user *arg32 =
2349             (BIG_IOCTL32_Command_struct __user *) arg;
2350         BIG_IOCTL_Command_struct arg64;
2351         BIG_IOCTL_Command_struct __user *p =
2352             compat_alloc_user_space(sizeof(arg64));
2353         int err;
2354         u32 cp;
2355
2356         err = 0;
2357         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2358                            sizeof(arg64.LUN_info));
2359         err |= copy_from_user(&arg64.Request, &arg32->Request,
2360                            sizeof(arg64.Request));
2361         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2362                            sizeof(arg64.error_info));
2363         err |= get_user(arg64.buf_size, &arg32->buf_size);
2364         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2365         err |= get_user(cp, &arg32->buf);
2366         arg64.buf = compat_ptr(cp);
2367         err |= copy_to_user(p, &arg64, sizeof(arg64));
2368
2369         if (err)
2370                 return -EFAULT;
2371
2372         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2373         if (err)
2374                 return err;
2375         err |= copy_in_user(&arg32->error_info, &p->error_info,
2376                          sizeof(arg32->error_info));
2377         if (err)
2378                 return -EFAULT;
2379         return err;
2380 }
2381
2382 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2383 {
2384         switch (cmd) {
2385         case CCISS_GETPCIINFO:
2386         case CCISS_GETINTINFO:
2387         case CCISS_SETINTINFO:
2388         case CCISS_GETNODENAME:
2389         case CCISS_SETNODENAME:
2390         case CCISS_GETHEARTBEAT:
2391         case CCISS_GETBUSTYPES:
2392         case CCISS_GETFIRMVER:
2393         case CCISS_GETDRIVVER:
2394         case CCISS_REVALIDVOLS:
2395         case CCISS_DEREGDISK:
2396         case CCISS_REGNEWDISK:
2397         case CCISS_REGNEWD:
2398         case CCISS_RESCANDISK:
2399         case CCISS_GETLUNINFO:
2400                 return hpsa_ioctl(dev, cmd, arg);
2401
2402         case CCISS_PASSTHRU32:
2403                 return hpsa_ioctl32_passthru(dev, cmd, arg);
2404         case CCISS_BIG_PASSTHRU32:
2405                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2406
2407         default:
2408                 return -ENOIOCTLCMD;
2409         }
2410 }
2411 #endif
2412
2413 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2414 {
2415         struct hpsa_pci_info pciinfo;
2416
2417         if (!argp)
2418                 return -EINVAL;
2419         pciinfo.domain = pci_domain_nr(h->pdev->bus);
2420         pciinfo.bus = h->pdev->bus->number;
2421         pciinfo.dev_fn = h->pdev->devfn;
2422         pciinfo.board_id = h->board_id;
2423         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2424                 return -EFAULT;
2425         return 0;
2426 }
2427
2428 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2429 {
2430         DriverVer_type DriverVer;
2431         unsigned char vmaj, vmin, vsubmin;
2432         int rc;
2433
2434         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2435                 &vmaj, &vmin, &vsubmin);
2436         if (rc != 3) {
2437                 dev_info(&h->pdev->dev, "driver version string '%s' "
2438                         "unrecognized.", HPSA_DRIVER_VERSION);
2439                 vmaj = 0;
2440                 vmin = 0;
2441                 vsubmin = 0;
2442         }
2443         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2444         if (!argp)
2445                 return -EINVAL;
2446         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2447                 return -EFAULT;
2448         return 0;
2449 }
2450
2451 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2452 {
2453         IOCTL_Command_struct iocommand;
2454         struct CommandList *c;
2455         char *buff = NULL;
2456         union u64bit temp64;
2457
2458         if (!argp)
2459                 return -EINVAL;
2460         if (!capable(CAP_SYS_RAWIO))
2461                 return -EPERM;
2462         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2463                 return -EFAULT;
2464         if ((iocommand.buf_size < 1) &&
2465             (iocommand.Request.Type.Direction != XFER_NONE)) {
2466                 return -EINVAL;
2467         }
2468         if (iocommand.buf_size > 0) {
2469                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2470                 if (buff == NULL)
2471                         return -EFAULT;
2472         }
2473         if (iocommand.Request.Type.Direction == XFER_WRITE) {
2474                 /* Copy the data into the buffer we created */
2475                 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2476                         kfree(buff);
2477                         return -EFAULT;
2478                 }
2479         } else
2480                 memset(buff, 0, iocommand.buf_size);
2481         c = cmd_special_alloc(h);
2482         if (c == NULL) {
2483                 kfree(buff);
2484                 return -ENOMEM;
2485         }
2486         /* Fill in the command type */
2487         c->cmd_type = CMD_IOCTL_PEND;
2488         /* Fill in Command Header */
2489         c->Header.ReplyQueue = 0; /* unused in simple mode */
2490         if (iocommand.buf_size > 0) {   /* buffer to fill */
2491                 c->Header.SGList = 1;
2492                 c->Header.SGTotal = 1;
2493         } else  { /* no buffers to fill */
2494                 c->Header.SGList = 0;
2495                 c->Header.SGTotal = 0;
2496         }
2497         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2498         /* use the kernel address the cmd block for tag */
2499         c->Header.Tag.lower = c->busaddr;
2500
2501         /* Fill in Request block */
2502         memcpy(&c->Request, &iocommand.Request,
2503                 sizeof(c->Request));
2504
2505         /* Fill in the scatter gather information */
2506         if (iocommand.buf_size > 0) {
2507                 temp64.val = pci_map_single(h->pdev, buff,
2508                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2509                 c->SG[0].Addr.lower = temp64.val32.lower;
2510                 c->SG[0].Addr.upper = temp64.val32.upper;
2511                 c->SG[0].Len = iocommand.buf_size;
2512                 c->SG[0].Ext = 0; /* we are not chaining*/
2513         }
2514         hpsa_scsi_do_simple_cmd_core(h, c);
2515         hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2516         check_ioctl_unit_attention(h, c);
2517
2518         /* Copy the error information out */
2519         memcpy(&iocommand.error_info, c->err_info,
2520                 sizeof(iocommand.error_info));
2521         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2522                 kfree(buff);
2523                 cmd_special_free(h, c);
2524                 return -EFAULT;
2525         }
2526
2527         if (iocommand.Request.Type.Direction == XFER_READ) {
2528                 /* Copy the data out of the buffer we created */
2529                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2530                         kfree(buff);
2531                         cmd_special_free(h, c);
2532                         return -EFAULT;
2533                 }
2534         }
2535         kfree(buff);
2536         cmd_special_free(h, c);
2537         return 0;
2538 }
2539
2540 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2541 {
2542         BIG_IOCTL_Command_struct *ioc;
2543         struct CommandList *c;
2544         unsigned char **buff = NULL;
2545         int *buff_size = NULL;
2546         union u64bit temp64;
2547         BYTE sg_used = 0;
2548         int status = 0;
2549         int i;
2550         u32 left;
2551         u32 sz;
2552         BYTE __user *data_ptr;
2553
2554         if (!argp)
2555                 return -EINVAL;
2556         if (!capable(CAP_SYS_RAWIO))
2557                 return -EPERM;
2558         ioc = (BIG_IOCTL_Command_struct *)
2559             kmalloc(sizeof(*ioc), GFP_KERNEL);
2560         if (!ioc) {
2561                 status = -ENOMEM;
2562                 goto cleanup1;
2563         }
2564         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2565                 status = -EFAULT;
2566                 goto cleanup1;
2567         }
2568         if ((ioc->buf_size < 1) &&
2569             (ioc->Request.Type.Direction != XFER_NONE)) {
2570                 status = -EINVAL;
2571                 goto cleanup1;
2572         }
2573         /* Check kmalloc limits  using all SGs */
2574         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2575                 status = -EINVAL;
2576                 goto cleanup1;
2577         }
2578         if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2579                 status = -EINVAL;
2580                 goto cleanup1;
2581         }
2582         buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2583         if (!buff) {
2584                 status = -ENOMEM;
2585                 goto cleanup1;
2586         }
2587         buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2588         if (!buff_size) {
2589                 status = -ENOMEM;
2590                 goto cleanup1;
2591         }
2592         left = ioc->buf_size;
2593         data_ptr = ioc->buf;
2594         while (left) {
2595                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2596                 buff_size[sg_used] = sz;
2597                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2598                 if (buff[sg_used] == NULL) {
2599                         status = -ENOMEM;
2600                         goto cleanup1;
2601                 }
2602                 if (ioc->Request.Type.Direction == XFER_WRITE) {
2603                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2604                                 status = -ENOMEM;
2605                                 goto cleanup1;
2606                         }
2607                 } else
2608                         memset(buff[sg_used], 0, sz);
2609                 left -= sz;
2610                 data_ptr += sz;
2611                 sg_used++;
2612         }
2613         c = cmd_special_alloc(h);
2614         if (c == NULL) {
2615                 status = -ENOMEM;
2616                 goto cleanup1;
2617         }
2618         c->cmd_type = CMD_IOCTL_PEND;
2619         c->Header.ReplyQueue = 0;
2620
2621         if (ioc->buf_size > 0) {
2622                 c->Header.SGList = sg_used;
2623                 c->Header.SGTotal = sg_used;
2624         } else {
2625                 c->Header.SGList = 0;
2626                 c->Header.SGTotal = 0;
2627         }
2628         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2629         c->Header.Tag.lower = c->busaddr;
2630         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2631         if (ioc->buf_size > 0) {
2632                 int i;
2633                 for (i = 0; i < sg_used; i++) {
2634                         temp64.val = pci_map_single(h->pdev, buff[i],
2635                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
2636                         c->SG[i].Addr.lower = temp64.val32.lower;
2637                         c->SG[i].Addr.upper = temp64.val32.upper;
2638                         c->SG[i].Len = buff_size[i];
2639                         /* we are not chaining */
2640                         c->SG[i].Ext = 0;
2641                 }
2642         }
2643         hpsa_scsi_do_simple_cmd_core(h, c);
2644         hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2645         check_ioctl_unit_attention(h, c);
2646         /* Copy the error information out */
2647         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2648         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2649                 cmd_special_free(h, c);
2650                 status = -EFAULT;
2651                 goto cleanup1;
2652         }
2653         if (ioc->Request.Type.Direction == XFER_READ) {
2654                 /* Copy the data out of the buffer we created */
2655                 BYTE __user *ptr = ioc->buf;
2656                 for (i = 0; i < sg_used; i++) {
2657                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
2658                                 cmd_special_free(h, c);
2659                                 status = -EFAULT;
2660                                 goto cleanup1;
2661                         }
2662                         ptr += buff_size[i];
2663                 }
2664         }
2665         cmd_special_free(h, c);
2666         status = 0;
2667 cleanup1:
2668         if (buff) {
2669                 for (i = 0; i < sg_used; i++)
2670                         kfree(buff[i]);
2671                 kfree(buff);
2672         }
2673         kfree(buff_size);
2674         kfree(ioc);
2675         return status;
2676 }
2677
2678 static void check_ioctl_unit_attention(struct ctlr_info *h,
2679         struct CommandList *c)
2680 {
2681         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2682                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2683                 (void) check_for_unit_attention(h, c);
2684 }
2685 /*
2686  * ioctl
2687  */
2688 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2689 {
2690         struct ctlr_info *h;
2691         void __user *argp = (void __user *)arg;
2692
2693         h = sdev_to_hba(dev);
2694
2695         switch (cmd) {
2696         case CCISS_DEREGDISK:
2697         case CCISS_REGNEWDISK:
2698         case CCISS_REGNEWD:
2699                 hpsa_scan_start(h->scsi_host);
2700                 return 0;
2701         case CCISS_GETPCIINFO:
2702                 return hpsa_getpciinfo_ioctl(h, argp);
2703         case CCISS_GETDRIVVER:
2704                 return hpsa_getdrivver_ioctl(h, argp);
2705         case CCISS_PASSTHRU:
2706                 return hpsa_passthru_ioctl(h, argp);
2707         case CCISS_BIG_PASSTHRU:
2708                 return hpsa_big_passthru_ioctl(h, argp);
2709         default:
2710                 return -ENOTTY;
2711         }
2712 }
2713
2714 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2715         void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2716         int cmd_type)
2717 {
2718         int pci_dir = XFER_NONE;
2719
2720         c->cmd_type = CMD_IOCTL_PEND;
2721         c->Header.ReplyQueue = 0;
2722         if (buff != NULL && size > 0) {
2723                 c->Header.SGList = 1;
2724                 c->Header.SGTotal = 1;
2725         } else {
2726                 c->Header.SGList = 0;
2727                 c->Header.SGTotal = 0;
2728         }
2729         c->Header.Tag.lower = c->busaddr;
2730         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2731
2732         c->Request.Type.Type = cmd_type;
2733         if (cmd_type == TYPE_CMD) {
2734                 switch (cmd) {
2735                 case HPSA_INQUIRY:
2736                         /* are we trying to read a vital product page */
2737                         if (page_code != 0) {
2738                                 c->Request.CDB[1] = 0x01;
2739                                 c->Request.CDB[2] = page_code;
2740                         }
2741                         c->Request.CDBLen = 6;
2742                         c->Request.Type.Attribute = ATTR_SIMPLE;
2743                         c->Request.Type.Direction = XFER_READ;
2744                         c->Request.Timeout = 0;
2745                         c->Request.CDB[0] = HPSA_INQUIRY;
2746                         c->Request.CDB[4] = size & 0xFF;
2747                         break;
2748                 case HPSA_REPORT_LOG:
2749                 case HPSA_REPORT_PHYS:
2750                         /* Talking to controller so It's a physical command
2751                            mode = 00 target = 0.  Nothing to write.
2752                          */
2753                         c->Request.CDBLen = 12;
2754                         c->Request.Type.Attribute = ATTR_SIMPLE;
2755                         c->Request.Type.Direction = XFER_READ;
2756                         c->Request.Timeout = 0;
2757                         c->Request.CDB[0] = cmd;
2758                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2759                         c->Request.CDB[7] = (size >> 16) & 0xFF;
2760                         c->Request.CDB[8] = (size >> 8) & 0xFF;
2761                         c->Request.CDB[9] = size & 0xFF;
2762                         break;
2763
2764                 case HPSA_READ_CAPACITY:
2765                         c->Request.CDBLen = 10;
2766                         c->Request.Type.Attribute = ATTR_SIMPLE;
2767                         c->Request.Type.Direction = XFER_READ;
2768                         c->Request.Timeout = 0;
2769                         c->Request.CDB[0] = cmd;
2770                         break;
2771                 case HPSA_CACHE_FLUSH:
2772                         c->Request.CDBLen = 12;
2773                         c->Request.Type.Attribute = ATTR_SIMPLE;
2774                         c->Request.Type.Direction = XFER_WRITE;
2775                         c->Request.Timeout = 0;
2776                         c->Request.CDB[0] = BMIC_WRITE;
2777                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2778                         break;
2779                 case TEST_UNIT_READY:
2780                         c->Request.CDBLen = 6;
2781                         c->Request.Type.Attribute = ATTR_SIMPLE;
2782                         c->Request.Type.Direction = XFER_NONE;
2783                         c->Request.Timeout = 0;
2784                         break;
2785                 default:
2786                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2787                         BUG();
2788                         return;
2789                 }
2790         } else if (cmd_type == TYPE_MSG) {
2791                 switch (cmd) {
2792
2793                 case  HPSA_DEVICE_RESET_MSG:
2794                         c->Request.CDBLen = 16;
2795                         c->Request.Type.Type =  1; /* It is a MSG not a CMD */
2796                         c->Request.Type.Attribute = ATTR_SIMPLE;
2797                         c->Request.Type.Direction = XFER_NONE;
2798                         c->Request.Timeout = 0; /* Don't time out */
2799                         c->Request.CDB[0] =  0x01; /* RESET_MSG is 0x01 */
2800                         c->Request.CDB[1] = 0x03;  /* Reset target above */
2801                         /* If bytes 4-7 are zero, it means reset the */
2802                         /* LunID device */
2803                         c->Request.CDB[4] = 0x00;
2804                         c->Request.CDB[5] = 0x00;
2805                         c->Request.CDB[6] = 0x00;
2806                         c->Request.CDB[7] = 0x00;
2807                 break;
2808
2809                 default:
2810                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
2811                                 cmd);
2812                         BUG();
2813                 }
2814         } else {
2815                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2816                 BUG();
2817         }
2818
2819         switch (c->Request.Type.Direction) {
2820         case XFER_READ:
2821                 pci_dir = PCI_DMA_FROMDEVICE;
2822                 break;
2823         case XFER_WRITE:
2824                 pci_dir = PCI_DMA_TODEVICE;
2825                 break;
2826         case XFER_NONE:
2827                 pci_dir = PCI_DMA_NONE;
2828                 break;
2829         default:
2830                 pci_dir = PCI_DMA_BIDIRECTIONAL;
2831         }
2832
2833         hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2834
2835         return;
2836 }
2837
2838 /*
2839  * Map (physical) PCI mem into (virtual) kernel space
2840  */
2841 static void __iomem *remap_pci_mem(ulong base, ulong size)
2842 {
2843         ulong page_base = ((ulong) base) & PAGE_MASK;
2844         ulong page_offs = ((ulong) base) - page_base;
2845         void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2846
2847         return page_remapped ? (page_remapped + page_offs) : NULL;
2848 }
2849
2850 /* Takes cmds off the submission queue and sends them to the hardware,
2851  * then puts them on the queue of cmds waiting for completion.
2852  */
2853 static void start_io(struct ctlr_info *h)
2854 {
2855         struct CommandList *c;
2856
2857         while (!hlist_empty(&h->reqQ)) {
2858                 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2859                 /* can't do anything if fifo is full */
2860                 if ((h->access.fifo_full(h))) {
2861                         dev_warn(&h->pdev->dev, "fifo full\n");
2862                         break;
2863                 }
2864
2865                 /* Get the first entry from the Request Q */
2866                 removeQ(c);
2867                 h->Qdepth--;
2868
2869                 /* Tell the controller execute command */
2870                 h->access.submit_command(h, c);
2871
2872                 /* Put job onto the completed Q */
2873                 addQ(&h->cmpQ, c);
2874         }
2875 }
2876
2877 static inline unsigned long get_next_completion(struct ctlr_info *h)
2878 {
2879         return h->access.command_completed(h);
2880 }
2881
2882 static inline bool interrupt_pending(struct ctlr_info *h)
2883 {
2884         return h->access.intr_pending(h);
2885 }
2886
2887 static inline long interrupt_not_for_us(struct ctlr_info *h)
2888 {
2889         return !(h->msi_vector || h->msix_vector) &&
2890                 ((h->access.intr_pending(h) == 0) ||
2891                 (h->interrupts_enabled == 0));
2892 }
2893
2894 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2895         u32 raw_tag)
2896 {
2897         if (unlikely(tag_index >= h->nr_cmds)) {
2898                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2899                 return 1;
2900         }
2901         return 0;
2902 }
2903
2904 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2905 {
2906         removeQ(c);
2907         if (likely(c->cmd_type == CMD_SCSI))
2908                 complete_scsi_command(c, 0, raw_tag);
2909         else if (c->cmd_type == CMD_IOCTL_PEND)
2910                 complete(c->waiting);
2911 }
2912
2913 static inline u32 hpsa_tag_contains_index(u32 tag)
2914 {
2915 #define DIRECT_LOOKUP_BIT 0x10
2916         return tag & DIRECT_LOOKUP_BIT;
2917 }
2918
2919 static inline u32 hpsa_tag_to_index(u32 tag)
2920 {
2921 #define DIRECT_LOOKUP_SHIFT 5
2922         return tag >> DIRECT_LOOKUP_SHIFT;
2923 }
2924
2925 static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2926 {
2927 #define HPSA_ERROR_BITS 0x03
2928         return tag & ~HPSA_ERROR_BITS;
2929 }
2930
2931 /* process completion of an indexed ("direct lookup") command */
2932 static inline u32 process_indexed_cmd(struct ctlr_info *h,
2933         u32 raw_tag)
2934 {
2935         u32 tag_index;
2936         struct CommandList *c;
2937
2938         tag_index = hpsa_tag_to_index(raw_tag);
2939         if (bad_tag(h, tag_index, raw_tag))
2940                 return next_command(h);
2941         c = h->cmd_pool + tag_index;
2942         finish_cmd(c, raw_tag);
2943         return next_command(h);
2944 }
2945
2946 /* process completion of a non-indexed command */
2947 static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2948         u32 raw_tag)
2949 {
2950         u32 tag;
2951         struct CommandList *c = NULL;
2952         struct hlist_node *tmp;
2953
2954         tag = hpsa_tag_discard_error_bits(raw_tag);
2955         hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2956                 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2957                         finish_cmd(c, raw_tag);
2958                         return next_command(h);
2959                 }
2960         }
2961         bad_tag(h, h->nr_cmds + 1, raw_tag);
2962         return next_command(h);
2963 }
2964
2965 static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2966 {
2967         struct ctlr_info *h = dev_id;
2968         unsigned long flags;
2969         u32 raw_tag;
2970
2971         if (interrupt_not_for_us(h))
2972                 return IRQ_NONE;
2973         spin_lock_irqsave(&h->lock, flags);
2974         raw_tag = get_next_completion(h);
2975         while (raw_tag != FIFO_EMPTY) {
2976                 if (hpsa_tag_contains_index(raw_tag))
2977                         raw_tag = process_indexed_cmd(h, raw_tag);
2978                 else
2979                         raw_tag = process_nonindexed_cmd(h, raw_tag);
2980         }
2981         spin_unlock_irqrestore(&h->lock, flags);
2982         return IRQ_HANDLED;
2983 }
2984
2985 /* Send a message CDB to the firmware. */
2986 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2987                                                 unsigned char type)
2988 {
2989         struct Command {
2990                 struct CommandListHeader CommandHeader;
2991                 struct RequestBlock Request;
2992                 struct ErrDescriptor ErrorDescriptor;
2993         };
2994         struct Command *cmd;
2995         static const size_t cmd_sz = sizeof(*cmd) +
2996                                         sizeof(cmd->ErrorDescriptor);
2997         dma_addr_t paddr64;
2998         uint32_t paddr32, tag;
2999         void __iomem *vaddr;
3000         int i, err;
3001
3002         vaddr = pci_ioremap_bar(pdev, 0);
3003         if (vaddr == NULL)
3004                 return -ENOMEM;
3005
3006         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3007          * CCISS commands, so they must be allocated from the lower 4GiB of
3008          * memory.
3009          */
3010         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3011         if (err) {
3012                 iounmap(vaddr);
3013                 return -ENOMEM;
3014         }
3015
3016         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3017         if (cmd == NULL) {
3018                 iounmap(vaddr);
3019                 return -ENOMEM;
3020         }
3021
3022         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
3023          * although there's no guarantee, we assume that the address is at
3024          * least 4-byte aligned (most likely, it's page-aligned).
3025          */
3026         paddr32 = paddr64;
3027
3028         cmd->CommandHeader.ReplyQueue = 0;
3029         cmd->CommandHeader.SGList = 0;
3030         cmd->CommandHeader.SGTotal = 0;
3031         cmd->CommandHeader.Tag.lower = paddr32;
3032         cmd->CommandHeader.Tag.upper = 0;
3033         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3034
3035         cmd->Request.CDBLen = 16;
3036         cmd->Request.Type.Type = TYPE_MSG;
3037         cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3038         cmd->Request.Type.Direction = XFER_NONE;
3039         cmd->Request.Timeout = 0; /* Don't time out */
3040         cmd->Request.CDB[0] = opcode;
3041         cmd->Request.CDB[1] = type;
3042         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3043         cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3044         cmd->ErrorDescriptor.Addr.upper = 0;
3045         cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3046
3047         writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3048
3049         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3050                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3051                 if (hpsa_tag_discard_error_bits(tag) == paddr32)
3052                         break;
3053                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3054         }
3055
3056         iounmap(vaddr);
3057
3058         /* we leak the DMA buffer here ... no choice since the controller could
3059          *  still complete the command.
3060          */
3061         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3062                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3063                         opcode, type);
3064                 return -ETIMEDOUT;
3065         }
3066
3067         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3068
3069         if (tag & HPSA_ERROR_BIT) {
3070                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3071                         opcode, type);
3072                 return -EIO;
3073         }
3074
3075         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3076                 opcode, type);
3077         return 0;
3078 }
3079
3080 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3081 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3082
3083 static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
3084 {
3085 /* the #defines are stolen from drivers/pci/msi.h. */
3086 #define msi_control_reg(base)           (base + PCI_MSI_FLAGS)
3087 #define PCI_MSIX_FLAGS_ENABLE           (1 << 15)
3088
3089         int pos;
3090         u16 control = 0;
3091
3092         pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3093         if (pos) {
3094                 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3095                 if (control & PCI_MSI_FLAGS_ENABLE) {
3096                         dev_info(&pdev->dev, "resetting MSI\n");
3097                         pci_write_config_word(pdev, msi_control_reg(pos),
3098                                         control & ~PCI_MSI_FLAGS_ENABLE);
3099                 }
3100         }
3101
3102         pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3103         if (pos) {
3104                 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3105                 if (control & PCI_MSIX_FLAGS_ENABLE) {
3106                         dev_info(&pdev->dev, "resetting MSI-X\n");
3107                         pci_write_config_word(pdev, msi_control_reg(pos),
3108                                         control & ~PCI_MSIX_FLAGS_ENABLE);
3109                 }
3110         }
3111
3112         return 0;
3113 }
3114
3115 /* This does a hard reset of the controller using PCI power management
3116  * states.
3117  */
3118 static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3119 {
3120         u16 pmcsr, saved_config_space[32];
3121         int i, pos;
3122
3123         dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3124
3125         /* This is very nearly the same thing as
3126          *
3127          * pci_save_state(pci_dev);
3128          * pci_set_power_state(pci_dev, PCI_D3hot);
3129          * pci_set_power_state(pci_dev, PCI_D0);
3130          * pci_restore_state(pci_dev);
3131          *
3132          * but we can't use these nice canned kernel routines on
3133          * kexec, because they also check the MSI/MSI-X state in PCI
3134          * configuration space and do the wrong thing when it is
3135          * set/cleared.  Also, the pci_save/restore_state functions
3136          * violate the ordering requirements for restoring the
3137          * configuration space from the CCISS document (see the
3138          * comment below).  So we roll our own ....
3139          */
3140
3141         for (i = 0; i < 32; i++)
3142                 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3143
3144         pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3145         if (pos == 0) {
3146                 dev_err(&pdev->dev,
3147                         "hpsa_reset_controller: PCI PM not supported\n");
3148                 return -ENODEV;
3149         }
3150
3151         /* Quoting from the Open CISS Specification: "The Power
3152          * Management Control/Status Register (CSR) controls the power
3153          * state of the device.  The normal operating state is D0,
3154          * CSR=00h.  The software off state is D3, CSR=03h.  To reset
3155          * the controller, place the interface device in D3 then to
3156          * D0, this causes a secondary PCI reset which will reset the
3157          * controller."
3158          */
3159
3160         /* enter the D3hot power management state */
3161         pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3162         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3163         pmcsr |= PCI_D3hot;
3164         pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3165
3166         msleep(500);
3167
3168         /* enter the D0 power management state */
3169         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3170         pmcsr |= PCI_D0;
3171         pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3172
3173         msleep(500);
3174
3175         /* Restore the PCI configuration space.  The Open CISS
3176          * Specification says, "Restore the PCI Configuration
3177          * Registers, offsets 00h through 60h. It is important to
3178          * restore the command register, 16-bits at offset 04h,
3179          * last. Do not restore the configuration status register,
3180          * 16-bits at offset 06h."  Note that the offset is 2*i.
3181          */
3182         for (i = 0; i < 32; i++) {
3183                 if (i == 2 || i == 3)
3184                         continue;
3185                 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3186         }
3187         wmb();
3188         pci_write_config_word(pdev, 4, saved_config_space[2]);
3189
3190         return 0;
3191 }
3192
3193 /*
3194  *  We cannot read the structure directly, for portability we must use
3195  *   the io functions.
3196  *   This is for debug only.
3197  */
3198 #ifdef HPSA_DEBUG
3199 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3200 {
3201         int i;
3202         char temp_name[17];
3203
3204         dev_info(dev, "Controller Configuration information\n");
3205         dev_info(dev, "------------------------------------\n");
3206         for (i = 0; i < 4; i++)
3207                 temp_name[i] = readb(&(tb->Signature[i]));
3208         temp_name[4] = '\0';
3209         dev_info(dev, "   Signature = %s\n", temp_name);
3210         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
3211         dev_info(dev, "   Transport methods supported = 0x%x\n",
3212                readl(&(tb->TransportSupport)));
3213         dev_info(dev, "   Transport methods active = 0x%x\n",
3214                readl(&(tb->TransportActive)));
3215         dev_info(dev, "   Requested transport Method = 0x%x\n",
3216                readl(&(tb->HostWrite.TransportRequest)));
3217         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
3218                readl(&(tb->HostWrite.CoalIntDelay)));
3219         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
3220                readl(&(tb->HostWrite.CoalIntCount)));
3221         dev_info(dev, "   Max outstanding commands = 0x%d\n",
3222                readl(&(tb->CmdsOutMax)));
3223         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3224         for (i = 0; i < 16; i++)
3225                 temp_name[i] = readb(&(tb->ServerName[i]));
3226         temp_name[16] = '\0';
3227         dev_info(dev, "   Server Name = %s\n", temp_name);
3228         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
3229                 readl(&(tb->HeartBeat)));
3230 }
3231 #endif                          /* HPSA_DEBUG */
3232
3233 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3234 {
3235         int i, offset, mem_type, bar_type;
3236
3237         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3238                 return 0;
3239         offset = 0;
3240         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3241                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3242                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3243                         offset += 4;
3244                 else {
3245                         mem_type = pci_resource_flags(pdev, i) &
3246                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3247                         switch (mem_type) {
3248                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
3249                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3250                                 offset += 4;    /* 32 bit */
3251                                 break;
3252                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
3253                                 offset += 8;
3254                                 break;
3255                         default:        /* reserved in PCI 2.2 */
3256                                 dev_warn(&pdev->dev,
3257                                        "base address is invalid\n");
3258                                 return -1;
3259                                 break;
3260                         }
3261                 }
3262                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3263                         return i + 1;
3264         }
3265         return -1;
3266 }
3267
3268 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3269  * controllers that are capable. If not, we use IO-APIC mode.
3270  */
3271
3272 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3273                                            struct pci_dev *pdev, u32 board_id)
3274 {
3275 #ifdef CONFIG_PCI_MSI
3276         int err;
3277         struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3278         {0, 2}, {0, 3}
3279         };
3280
3281         /* Some boards advertise MSI but don't really support it */
3282         if ((board_id == 0x40700E11) ||
3283             (board_id == 0x40800E11) ||
3284             (board_id == 0x40820E11) || (board_id == 0x40830E11))
3285                 goto default_int_mode;
3286         if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3287                 dev_info(&pdev->dev, "MSIX\n");
3288                 err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
3289                 if (!err) {
3290                         h->intr[0] = hpsa_msix_entries[0].vector;
3291                         h->intr[1] = hpsa_msix_entries[1].vector;
3292                         h->intr[2] = hpsa_msix_entries[2].vector;
3293                         h->intr[3] = hpsa_msix_entries[3].vector;
3294                         h->msix_vector = 1;
3295                         return;
3296                 }
3297                 if (err > 0) {
3298                         dev_warn(&pdev->dev, "only %d MSI-X vectors "
3299                                "available\n", err);
3300                         goto default_int_mode;
3301                 } else {
3302                         dev_warn(&pdev->dev, "MSI-X init failed %d\n",
3303                                err);
3304                         goto default_int_mode;
3305                 }
3306         }
3307         if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3308                 dev_info(&pdev->dev, "MSI\n");
3309                 if (!pci_enable_msi(pdev))
3310                         h->msi_vector = 1;
3311                 else
3312                         dev_warn(&pdev->dev, "MSI init failed\n");
3313         }
3314 default_int_mode:
3315 #endif                          /* CONFIG_PCI_MSI */
3316         /* if we get here we're going to use the default interrupt mode */
3317         h->intr[PERF_MODE_INT] = pdev->irq;
3318 }
3319
3320 static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3321 {
3322         ushort subsystem_vendor_id, subsystem_device_id, command;
3323         u32 board_id, scratchpad = 0;
3324         u64 cfg_offset;
3325         u32 cfg_base_addr;
3326         u64 cfg_base_addr_index;
3327         u32 trans_offset;
3328         int i, prod_index, err;
3329
3330         subsystem_vendor_id = pdev->subsystem_vendor;
3331         subsystem_device_id = pdev->subsystem_device;
3332         board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
3333                     subsystem_vendor_id);
3334
3335         for (i = 0; i < ARRAY_SIZE(products); i++)
3336                 if (board_id == products[i].board_id)
3337                         break;
3338
3339         prod_index = i;
3340
3341         if (prod_index == ARRAY_SIZE(products)) {
3342                 prod_index--;
3343                 if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
3344                                 !hpsa_allow_any) {
3345                         dev_warn(&pdev->dev, "unrecognized board ID:"
3346                                 " 0x%08lx, ignoring.\n",
3347                                 (unsigned long) board_id);
3348                         return -ENODEV;
3349                 }
3350         }
3351         /* check to see if controller has been disabled
3352          * BEFORE trying to enable it
3353          */
3354         (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3355         if (!(command & 0x02)) {
3356                 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3357                 return -ENODEV;
3358         }
3359
3360         err = pci_enable_device(pdev);
3361         if (err) {
3362                 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3363                 return err;
3364         }
3365
3366         err = pci_request_regions(pdev, "hpsa");
3367         if (err) {
3368                 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
3369                 return err;
3370         }
3371
3372         /* If the kernel supports MSI/MSI-X we will try to enable that,
3373          * else we use the IO-APIC interrupt assigned to us by system ROM.
3374          */
3375         hpsa_interrupt_mode(h, pdev, board_id);
3376
3377         /* find the memory BAR */
3378         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3379                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3380                         break;
3381         }
3382         if (i == DEVICE_COUNT_RESOURCE) {
3383                 dev_warn(&pdev->dev, "no memory BAR found\n");
3384                 err = -ENODEV;
3385                 goto err_out_free_res;
3386         }
3387
3388         h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3389                                                  * already removed
3390                                                  */
3391
3392         h->vaddr = remap_pci_mem(h->paddr, 0x250);
3393
3394         /* Wait for the board to become ready.  */
3395         for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3396                 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3397                 if (scratchpad == HPSA_FIRMWARE_READY)
3398                         break;
3399                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3400         }
3401         if (scratchpad != HPSA_FIRMWARE_READY) {
3402                 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3403                 err = -ENODEV;
3404                 goto err_out_free_res;
3405         }
3406
3407         /* get the address index number */
3408         cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3409         cfg_base_addr &= (u32) 0x0000ffff;
3410         cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3411         if (cfg_base_addr_index == -1) {
3412                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3413                 err = -ENODEV;
3414                 goto err_out_free_res;
3415         }
3416
3417         cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
3418         h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3419                                cfg_base_addr_index) + cfg_offset,
3420                                 sizeof(h->cfgtable));
3421         /* Find performant mode table. */
3422         trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3423         h->transtable = remap_pci_mem(pci_resource_start(pdev,
3424                                 cfg_base_addr_index)+cfg_offset+trans_offset,
3425                                 sizeof(*h->transtable));
3426
3427         h->board_id = board_id;
3428         h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3429         h->product_name = products[prod_index].product_name;
3430         h->access = *(products[prod_index].access);
3431         /* Allow room for some ioctls */
3432         h->nr_cmds = h->max_commands - 4;
3433
3434         if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3435             (readb(&h->cfgtable->Signature[1]) != 'I') ||
3436             (readb(&h->cfgtable->Signature[2]) != 'S') ||
3437             (readb(&h->cfgtable->Signature[3]) != 'S')) {
3438                 dev_warn(&pdev->dev, "not a valid CISS config table\n");
3439                 err = -ENODEV;
3440                 goto err_out_free_res;
3441         }
3442 #ifdef CONFIG_X86
3443         {
3444                 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3445                 u32 prefetch;
3446                 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3447                 prefetch |= 0x100;
3448                 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3449         }
3450 #endif
3451
3452         /* Disabling DMA prefetch for the P600
3453          * An ASIC bug may result in a prefetch beyond
3454          * physical memory.
3455          */
3456         if (board_id == 0x3225103C) {
3457                 u32 dma_prefetch;
3458                 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3459                 dma_prefetch |= 0x8000;
3460                 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3461         }
3462
3463         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3464         /* Update the field, and then ring the doorbell */
3465         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3466         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3467
3468         /* under certain very rare conditions, this can take awhile.
3469          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3470          * as we enter this code.)
3471          */
3472         for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3473                 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3474                         break;
3475                 /* delay and try again */
3476                 msleep(10);
3477         }
3478
3479 #ifdef HPSA_DEBUG
3480         print_cfg_table(&pdev->dev, h->cfgtable);
3481 #endif                          /* HPSA_DEBUG */
3482
3483         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3484                 dev_warn(&pdev->dev, "unable to get board into simple mode\n");
3485                 err = -ENODEV;
3486                 goto err_out_free_res;
3487         }
3488         return 0;
3489
3490 err_out_free_res:
3491         /*
3492          * Deliberately omit pci_disable_device(): it does something nasty to
3493          * Smart Array controllers that pci_enable_device does not undo
3494          */
3495         pci_release_regions(pdev);
3496         return err;
3497 }
3498
3499 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3500 {
3501         int rc;
3502
3503 #define HBA_INQUIRY_BYTE_COUNT 64
3504         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3505         if (!h->hba_inquiry_data)
3506                 return;
3507         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3508                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3509         if (rc != 0) {
3510                 kfree(h->hba_inquiry_data);
3511                 h->hba_inquiry_data = NULL;
3512         }
3513 }
3514
3515 static int __devinit hpsa_init_one(struct pci_dev *pdev,
3516                                     const struct pci_device_id *ent)
3517 {
3518         int i, rc;
3519         int dac;
3520         struct ctlr_info *h;
3521
3522         if (number_of_controllers == 0)
3523                 printk(KERN_INFO DRIVER_NAME "\n");
3524         if (reset_devices) {
3525                 /* Reset the controller with a PCI power-cycle */
3526                 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3527                         return -ENODEV;
3528
3529                 /* Some devices (notably the HP Smart Array 5i Controller)
3530                    need a little pause here */
3531                 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3532
3533                 /* Now try to get the controller to respond to a no-op */
3534                 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3535                         if (hpsa_noop(pdev) == 0)
3536                                 break;
3537                         else
3538                                 dev_warn(&pdev->dev, "no-op failed%s\n",
3539                                                 (i < 11 ? "; re-trying" : ""));
3540                 }
3541         }
3542
3543         /* Command structures must be aligned on a 32-byte boundary because
3544          * the 5 lower bits of the address are used by the hardware. and by
3545          * the driver.  See comments in hpsa.h for more info.
3546          */
3547 #define COMMANDLIST_ALIGNMENT 32
3548         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3549         h = kzalloc(sizeof(*h), GFP_KERNEL);
3550         if (!h)
3551                 return -ENOMEM;
3552
3553         h->busy_initializing = 1;
3554         INIT_HLIST_HEAD(&h->cmpQ);
3555         INIT_HLIST_HEAD(&h->reqQ);
3556         mutex_init(&h->busy_shutting_down);
3557         init_completion(&h->scan_wait);
3558         rc = hpsa_pci_init(h, pdev);
3559         if (rc != 0)
3560                 goto clean1;
3561
3562         sprintf(h->devname, "hpsa%d", number_of_controllers);
3563         h->ctlr = number_of_controllers;
3564         number_of_controllers++;
3565         h->pdev = pdev;
3566
3567         /* configure PCI DMA stuff */
3568         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3569         if (rc == 0) {
3570                 dac = 1;
3571         } else {
3572                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3573                 if (rc == 0) {
3574                         dac = 0;
3575                 } else {
3576                         dev_err(&pdev->dev, "no suitable DMA available\n");
3577                         goto clean1;
3578                 }
3579         }
3580
3581         /* make sure the board interrupts are off */
3582         h->access.set_intr_mask(h, HPSA_INTR_OFF);
3583         rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
3584                         IRQF_DISABLED, h->devname, h);
3585         if (rc) {
3586                 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3587                        h->intr[PERF_MODE_INT], h->devname);
3588                 goto clean2;
3589         }
3590
3591         dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3592                h->devname, pdev->device,
3593                h->intr[PERF_MODE_INT], dac ? "" : " not");
3594
3595         h->cmd_pool_bits =
3596             kmalloc(((h->nr_cmds + BITS_PER_LONG -
3597                       1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3598         h->cmd_pool = pci_alloc_consistent(h->pdev,
3599                     h->nr_cmds * sizeof(*h->cmd_pool),
3600                     &(h->cmd_pool_dhandle));
3601         h->errinfo_pool = pci_alloc_consistent(h->pdev,
3602                     h->nr_cmds * sizeof(*h->errinfo_pool),
3603                     &(h->errinfo_pool_dhandle));
3604         if ((h->cmd_pool_bits == NULL)
3605             || (h->cmd_pool == NULL)
3606             || (h->errinfo_pool == NULL)) {
3607                 dev_err(&pdev->dev, "out of memory");
3608                 rc = -ENOMEM;
3609                 goto clean4;
3610         }
3611         spin_lock_init(&h->lock);
3612         spin_lock_init(&h->scan_lock);
3613         init_waitqueue_head(&h->scan_wait_queue);
3614         h->scan_finished = 1; /* no scan currently in progress */
3615
3616         pci_set_drvdata(pdev, h);
3617         memset(h->cmd_pool_bits, 0,
3618                ((h->nr_cmds + BITS_PER_LONG -
3619                  1) / BITS_PER_LONG) * sizeof(unsigned long));
3620
3621         hpsa_scsi_setup(h);
3622
3623         /* Turn the interrupts on so we can service requests */
3624         h->access.set_intr_mask(h, HPSA_INTR_ON);
3625
3626         hpsa_put_ctlr_into_performant_mode(h);
3627         hpsa_hba_inquiry(h);
3628         hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
3629         h->busy_initializing = 0;
3630         return 1;
3631
3632 clean4:
3633         kfree(h->cmd_pool_bits);
3634         if (h->cmd_pool)
3635                 pci_free_consistent(h->pdev,
3636                             h->nr_cmds * sizeof(struct CommandList),
3637                             h->cmd_pool, h->cmd_pool_dhandle);
3638         if (h->errinfo_pool)
3639                 pci_free_consistent(h->pdev,
3640                             h->nr_cmds * sizeof(struct ErrorInfo),
3641                             h->errinfo_pool,
3642                             h->errinfo_pool_dhandle);
3643         free_irq(h->intr[PERF_MODE_INT], h);
3644 clean2:
3645 clean1:
3646         h->busy_initializing = 0;
3647         kfree(h);
3648         return rc;
3649 }
3650
3651 static void hpsa_flush_cache(struct ctlr_info *h)
3652 {
3653         char *flush_buf;
3654         struct CommandList *c;
3655
3656         flush_buf = kzalloc(4, GFP_KERNEL);
3657         if (!flush_buf)
3658                 return;
3659
3660         c = cmd_special_alloc(h);
3661         if (!c) {
3662                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3663                 goto out_of_memory;
3664         }
3665         fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3666                 RAID_CTLR_LUNID, TYPE_CMD);
3667         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3668         if (c->err_info->CommandStatus != 0)
3669                 dev_warn(&h->pdev->dev,
3670                         "error flushing cache on controller\n");
3671         cmd_special_free(h, c);
3672 out_of_memory:
3673         kfree(flush_buf);
3674 }
3675
3676 static void hpsa_shutdown(struct pci_dev *pdev)
3677 {
3678         struct ctlr_info *h;
3679
3680         h = pci_get_drvdata(pdev);
3681         /* Turn board interrupts off  and send the flush cache command
3682          * sendcmd will turn off interrupt, and send the flush...
3683          * To write all data in the battery backed cache to disks
3684          */
3685         hpsa_flush_cache(h);
3686         h->access.set_intr_mask(h, HPSA_INTR_OFF);
3687         free_irq(h->intr[PERF_MODE_INT], h);
3688 #ifdef CONFIG_PCI_MSI
3689         if (h->msix_vector)
3690                 pci_disable_msix(h->pdev);
3691         else if (h->msi_vector)
3692                 pci_disable_msi(h->pdev);
3693 #endif                          /* CONFIG_PCI_MSI */
3694 }
3695
3696 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3697 {
3698         struct ctlr_info *h;
3699
3700         if (pci_get_drvdata(pdev) == NULL) {
3701                 dev_err(&pdev->dev, "unable to remove device \n");
3702                 return;
3703         }
3704         h = pci_get_drvdata(pdev);
3705         mutex_lock(&h->busy_shutting_down);
3706         remove_from_scan_list(h);
3707         hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
3708         hpsa_shutdown(pdev);
3709         iounmap(h->vaddr);
3710         pci_free_consistent(h->pdev,
3711                 h->nr_cmds * sizeof(struct CommandList),
3712                 h->cmd_pool, h->cmd_pool_dhandle);
3713         pci_free_consistent(h->pdev,
3714                 h->nr_cmds * sizeof(struct ErrorInfo),
3715                 h->errinfo_pool, h->errinfo_pool_dhandle);
3716         pci_free_consistent(h->pdev, h->reply_pool_size,
3717                 h->reply_pool, h->reply_pool_dhandle);
3718         kfree(h->cmd_pool_bits);
3719         kfree(h->blockFetchTable);
3720         kfree(h->hba_inquiry_data);
3721         /*
3722          * Deliberately omit pci_disable_device(): it does something nasty to
3723          * Smart Array controllers that pci_enable_device does not undo
3724          */
3725         pci_release_regions(pdev);
3726         pci_set_drvdata(pdev, NULL);
3727         mutex_unlock(&h->busy_shutting_down);
3728         kfree(h);
3729 }
3730
3731 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3732         __attribute__((unused)) pm_message_t state)
3733 {
3734         return -ENOSYS;
3735 }
3736
3737 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3738 {
3739         return -ENOSYS;
3740 }
3741
3742 static struct pci_driver hpsa_pci_driver = {
3743         .name = "hpsa",
3744         .probe = hpsa_init_one,
3745         .remove = __devexit_p(hpsa_remove_one),
3746         .id_table = hpsa_pci_device_id, /* id_table */
3747         .shutdown = hpsa_shutdown,
3748         .suspend = hpsa_suspend,
3749         .resume = hpsa_resume,
3750 };
3751
3752 /* Fill in bucket_map[], given nsgs (the max number of
3753  * scatter gather elements supported) and bucket[],
3754  * which is an array of 8 integers.  The bucket[] array
3755  * contains 8 different DMA transfer sizes (in 16
3756  * byte increments) which the controller uses to fetch
3757  * commands.  This function fills in bucket_map[], which
3758  * maps a given number of scatter gather elements to one of
3759  * the 8 DMA transfer sizes.  The point of it is to allow the
3760  * controller to only do as much DMA as needed to fetch the
3761  * command, with the DMA transfer size encoded in the lower
3762  * bits of the command address.
3763  */
3764 static void  calc_bucket_map(int bucket[], int num_buckets,
3765         int nsgs, int *bucket_map)
3766 {
3767         int i, j, b, size;
3768
3769         /* even a command with 0 SGs requires 4 blocks */
3770 #define MINIMUM_TRANSFER_BLOCKS 4
3771 #define NUM_BUCKETS 8
3772         /* Note, bucket_map must have nsgs+1 entries. */
3773         for (i = 0; i <= nsgs; i++) {
3774                 /* Compute size of a command with i SG entries */
3775                 size = i + MINIMUM_TRANSFER_BLOCKS;
3776                 b = num_buckets; /* Assume the biggest bucket */
3777                 /* Find the bucket that is just big enough */
3778                 for (j = 0; j < 8; j++) {
3779                         if (bucket[j] >= size) {
3780                                 b = j;
3781                                 break;
3782                         }
3783                 }
3784                 /* for a command with i SG entries, use bucket b. */
3785                 bucket_map[i] = b;
3786         }
3787 }
3788
3789 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3790 {
3791         u32 trans_support;
3792         u64 trans_offset;
3793         /*  5 = 1 s/g entry or 4k
3794          *  6 = 2 s/g entry or 8k
3795          *  8 = 4 s/g entry or 16k
3796          * 10 = 6 s/g entry or 24k
3797          */
3798         int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
3799         int i = 0;
3800         int l = 0;
3801         unsigned long register_value;
3802
3803         trans_support = readl(&(h->cfgtable->TransportSupport));
3804         if (!(trans_support & PERFORMANT_MODE))
3805                 return;
3806
3807         h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3808         h->max_sg_entries = 32;
3809         /* Performant mode ring buffer and supporting data structures */
3810         h->reply_pool_size = h->max_commands * sizeof(u64);
3811         h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
3812                                 &(h->reply_pool_dhandle));
3813
3814         /* Need a block fetch table for performant mode */
3815         h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
3816                                 sizeof(u32)), GFP_KERNEL);
3817
3818         if ((h->reply_pool == NULL)
3819                 || (h->blockFetchTable == NULL))
3820                 goto clean_up;
3821
3822         h->reply_pool_wraparound = 1; /* spec: init to 1 */
3823
3824         /* Controller spec: zero out this buffer. */
3825         memset(h->reply_pool, 0, h->reply_pool_size);
3826         h->reply_pool_head = h->reply_pool;
3827
3828         trans_offset = readl(&(h->cfgtable->TransMethodOffset));
3829         bft[7] = h->max_sg_entries + 4;
3830         calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
3831         for (i = 0; i < 8; i++)
3832                 writel(bft[i], &h->transtable->BlockFetch[i]);
3833
3834         /* size of controller ring buffer */
3835         writel(h->max_commands, &h->transtable->RepQSize);
3836         writel(1, &h->transtable->RepQCount);
3837         writel(0, &h->transtable->RepQCtrAddrLow32);
3838         writel(0, &h->transtable->RepQCtrAddrHigh32);
3839         writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
3840         writel(0, &h->transtable->RepQAddr0High32);
3841         writel(CFGTBL_Trans_Performant,
3842                 &(h->cfgtable->HostWrite.TransportRequest));
3843         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3844         /* under certain very rare conditions, this can take awhile.
3845          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3846          * as we enter this code.) */
3847         for (l = 0; l < MAX_CONFIG_WAIT; l++) {
3848                 register_value = readl(h->vaddr + SA5_DOORBELL);
3849                 if (!(register_value & CFGTBL_ChangeReq))
3850                         break;
3851                 /* delay and try again */
3852                 set_current_state(TASK_INTERRUPTIBLE);
3853                 schedule_timeout(10);
3854         }
3855         register_value = readl(&(h->cfgtable->TransportActive));
3856         if (!(register_value & CFGTBL_Trans_Performant)) {
3857                 dev_warn(&h->pdev->dev, "unable to get board into"
3858                                         " performant mode\n");
3859                 return;
3860         }
3861
3862         /* Change the access methods to the performant access methods */
3863         h->access = SA5_performant_access;
3864         h->transMethod = CFGTBL_Trans_Performant;
3865
3866         return;
3867
3868 clean_up:
3869         if (h->reply_pool)
3870                 pci_free_consistent(h->pdev, h->reply_pool_size,
3871                         h->reply_pool, h->reply_pool_dhandle);
3872         kfree(h->blockFetchTable);
3873 }
3874
3875 /*
3876  *  This is it.  Register the PCI driver information for the cards we control
3877  *  the OS will call our registered routines when it finds one of our cards.
3878  */
3879 static int __init hpsa_init(void)
3880 {
3881         int err;
3882         /* Start the scan thread */
3883         hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3884         if (IS_ERR(hpsa_scan_thread)) {
3885                 err = PTR_ERR(hpsa_scan_thread);
3886                 return -ENODEV;
3887         }
3888         err = pci_register_driver(&hpsa_pci_driver);
3889         if (err)
3890                 kthread_stop(hpsa_scan_thread);
3891         return err;
3892 }
3893
3894 static void __exit hpsa_cleanup(void)
3895 {
3896         pci_unregister_driver(&hpsa_pci_driver);
3897         kthread_stop(hpsa_scan_thread);
3898 }
3899
3900 module_init(hpsa_init);
3901 module_exit(hpsa_cleanup);