2 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2003 Red Hat <alan@redhat.com>
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/string.h>
10 #include <linux/kernel.h>
11 #include <linux/timer.h>
13 #include <linux/interrupt.h>
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/genhd.h>
17 #include <linux/blkpg.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hdreg.h>
22 #include <linux/ide.h>
23 #include <linux/bitops.h>
24 #include <linux/nmi.h>
26 #include <asm/byteorder.h>
28 #include <asm/uaccess.h>
32 * Conventional PIO operations for ATA devices
35 static u8 ide_inb (unsigned long port)
37 return (u8) inb(port);
40 static void ide_outb (u8 val, unsigned long port)
45 static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
50 void default_hwif_iops (ide_hwif_t *hwif)
52 hwif->OUTB = ide_outb;
53 hwif->OUTBSYNC = ide_outbsync;
58 * MMIO operations, typically used for SATA controllers
61 static u8 ide_mm_inb (unsigned long port)
63 return (u8) readb((void __iomem *) port);
66 static void ide_mm_outb (u8 value, unsigned long port)
68 writeb(value, (void __iomem *) port);
71 static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
73 writeb(value, (void __iomem *) port);
76 void default_hwif_mmiops (ide_hwif_t *hwif)
78 hwif->OUTB = ide_mm_outb;
79 /* Most systems will need to override OUTBSYNC, alas however
80 this one is controller specific! */
81 hwif->OUTBSYNC = ide_mm_outbsync;
82 hwif->INB = ide_mm_inb;
85 EXPORT_SYMBOL(default_hwif_mmiops);
87 void SELECT_DRIVE (ide_drive_t *drive)
89 ide_hwif_t *hwif = drive->hwif;
90 const struct ide_port_ops *port_ops = hwif->port_ops;
92 if (port_ops && port_ops->selectproc)
93 port_ops->selectproc(drive);
95 hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
98 void SELECT_MASK(ide_drive_t *drive, int mask)
100 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
102 if (port_ops && port_ops->maskproc)
103 port_ops->maskproc(drive, mask);
106 static void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
108 if (hwif->host_flags & IDE_HFLAG_MMIO)
109 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
111 outb(cmd, hwif->io_ports.command_addr);
114 static u8 ide_read_sff_dma_status(ide_hwif_t *hwif)
116 if (hwif->host_flags & IDE_HFLAG_MMIO)
117 return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
119 return inb(hwif->dma_base + ATA_DMA_STATUS);
122 static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
124 ide_hwif_t *hwif = drive->hwif;
125 struct ide_io_ports *io_ports = &hwif->io_ports;
126 struct ide_taskfile *tf = &task->tf;
127 void (*tf_outb)(u8 addr, unsigned long port);
128 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
129 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
132 tf_outb = ide_mm_outb;
136 if (task->tf_flags & IDE_TFLAG_FLAGGED)
139 if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
140 u16 data = (tf->hob_data << 8) | tf->data;
143 writew(data, (void __iomem *)io_ports->data_addr);
145 outw(data, io_ports->data_addr);
148 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
149 tf_outb(tf->hob_feature, io_ports->feature_addr);
150 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
151 tf_outb(tf->hob_nsect, io_ports->nsect_addr);
152 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
153 tf_outb(tf->hob_lbal, io_ports->lbal_addr);
154 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
155 tf_outb(tf->hob_lbam, io_ports->lbam_addr);
156 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
157 tf_outb(tf->hob_lbah, io_ports->lbah_addr);
159 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
160 tf_outb(tf->feature, io_ports->feature_addr);
161 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
162 tf_outb(tf->nsect, io_ports->nsect_addr);
163 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
164 tf_outb(tf->lbal, io_ports->lbal_addr);
165 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
166 tf_outb(tf->lbam, io_ports->lbam_addr);
167 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
168 tf_outb(tf->lbah, io_ports->lbah_addr);
170 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
171 tf_outb((tf->device & HIHI) | drive->select.all,
172 io_ports->device_addr);
175 static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
177 ide_hwif_t *hwif = drive->hwif;
178 struct ide_io_ports *io_ports = &hwif->io_ports;
179 struct ide_taskfile *tf = &task->tf;
180 void (*tf_outb)(u8 addr, unsigned long port);
181 u8 (*tf_inb)(unsigned long port);
182 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
185 tf_outb = ide_mm_outb;
192 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
196 data = readw((void __iomem *)io_ports->data_addr);
198 data = inw(io_ports->data_addr);
200 tf->data = data & 0xff;
201 tf->hob_data = (data >> 8) & 0xff;
204 /* be sure we're looking at the low order bits */
205 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
207 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
208 tf->nsect = tf_inb(io_ports->nsect_addr);
209 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
210 tf->lbal = tf_inb(io_ports->lbal_addr);
211 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
212 tf->lbam = tf_inb(io_ports->lbam_addr);
213 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
214 tf->lbah = tf_inb(io_ports->lbah_addr);
215 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
216 tf->device = tf_inb(io_ports->device_addr);
218 if (task->tf_flags & IDE_TFLAG_LBA48) {
219 tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
221 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
222 tf->hob_feature = tf_inb(io_ports->feature_addr);
223 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
224 tf->hob_nsect = tf_inb(io_ports->nsect_addr);
225 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
226 tf->hob_lbal = tf_inb(io_ports->lbal_addr);
227 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
228 tf->hob_lbam = tf_inb(io_ports->lbam_addr);
229 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
230 tf->hob_lbah = tf_inb(io_ports->lbah_addr);
235 * Some localbus EIDE interfaces require a special access sequence
236 * when using 32-bit I/O instructions to transfer data. We call this
237 * the "vlb_sync" sequence, which consists of three successive reads
238 * of the sector count register location, with interrupts disabled
239 * to ensure that the reads all happen together.
241 static void ata_vlb_sync(unsigned long port)
249 * This is used for most PIO data transfers *from* the IDE interface
251 * These routines will round up any request for an odd number of bytes,
252 * so if an odd len is specified, be sure that there's at least one
253 * extra byte allocated for the buffer.
255 static void ata_input_data(ide_drive_t *drive, struct request *rq,
256 void *buf, unsigned int len)
258 ide_hwif_t *hwif = drive->hwif;
259 struct ide_io_ports *io_ports = &hwif->io_ports;
260 unsigned long data_addr = io_ports->data_addr;
261 u8 io_32bit = drive->io_32bit;
262 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
267 unsigned long uninitialized_var(flags);
269 if ((io_32bit & 2) && !mmio) {
270 local_irq_save(flags);
271 ata_vlb_sync(io_ports->nsect_addr);
275 __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
277 insl(data_addr, buf, len / 4);
279 if ((io_32bit & 2) && !mmio)
280 local_irq_restore(flags);
282 if ((len & 3) >= 2) {
284 __ide_mm_insw((void __iomem *)data_addr,
285 (u8 *)buf + (len & ~3), 1);
287 insw(data_addr, (u8 *)buf + (len & ~3), 1);
291 __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
293 insw(data_addr, buf, len / 2);
298 * This is used for most PIO data transfers *to* the IDE interface
300 static void ata_output_data(ide_drive_t *drive, struct request *rq,
301 void *buf, unsigned int len)
303 ide_hwif_t *hwif = drive->hwif;
304 struct ide_io_ports *io_ports = &hwif->io_ports;
305 unsigned long data_addr = io_ports->data_addr;
306 u8 io_32bit = drive->io_32bit;
307 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
310 unsigned long uninitialized_var(flags);
312 if ((io_32bit & 2) && !mmio) {
313 local_irq_save(flags);
314 ata_vlb_sync(io_ports->nsect_addr);
318 __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
320 outsl(data_addr, buf, len / 4);
322 if ((io_32bit & 2) && !mmio)
323 local_irq_restore(flags);
325 if ((len & 3) >= 2) {
327 __ide_mm_outsw((void __iomem *)data_addr,
328 (u8 *)buf + (len & ~3), 1);
330 outsw(data_addr, (u8 *)buf + (len & ~3), 1);
334 __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
336 outsw(data_addr, buf, len / 2);
340 void default_hwif_transport(ide_hwif_t *hwif)
342 hwif->exec_command = ide_exec_command;
343 hwif->read_sff_dma_status = ide_read_sff_dma_status;
345 hwif->tf_load = ide_tf_load;
346 hwif->tf_read = ide_tf_read;
348 hwif->input_data = ata_input_data;
349 hwif->output_data = ata_output_data;
352 void ide_fix_driveid (struct hd_driveid *id)
354 #ifndef __LITTLE_ENDIAN
359 id->config = __le16_to_cpu(id->config);
360 id->cyls = __le16_to_cpu(id->cyls);
361 id->reserved2 = __le16_to_cpu(id->reserved2);
362 id->heads = __le16_to_cpu(id->heads);
363 id->track_bytes = __le16_to_cpu(id->track_bytes);
364 id->sector_bytes = __le16_to_cpu(id->sector_bytes);
365 id->sectors = __le16_to_cpu(id->sectors);
366 id->vendor0 = __le16_to_cpu(id->vendor0);
367 id->vendor1 = __le16_to_cpu(id->vendor1);
368 id->vendor2 = __le16_to_cpu(id->vendor2);
369 stringcast = (u16 *)&id->serial_no[0];
370 for (i = 0; i < (20/2); i++)
371 stringcast[i] = __le16_to_cpu(stringcast[i]);
372 id->buf_type = __le16_to_cpu(id->buf_type);
373 id->buf_size = __le16_to_cpu(id->buf_size);
374 id->ecc_bytes = __le16_to_cpu(id->ecc_bytes);
375 stringcast = (u16 *)&id->fw_rev[0];
376 for (i = 0; i < (8/2); i++)
377 stringcast[i] = __le16_to_cpu(stringcast[i]);
378 stringcast = (u16 *)&id->model[0];
379 for (i = 0; i < (40/2); i++)
380 stringcast[i] = __le16_to_cpu(stringcast[i]);
381 id->dword_io = __le16_to_cpu(id->dword_io);
382 id->reserved50 = __le16_to_cpu(id->reserved50);
383 id->field_valid = __le16_to_cpu(id->field_valid);
384 id->cur_cyls = __le16_to_cpu(id->cur_cyls);
385 id->cur_heads = __le16_to_cpu(id->cur_heads);
386 id->cur_sectors = __le16_to_cpu(id->cur_sectors);
387 id->cur_capacity0 = __le16_to_cpu(id->cur_capacity0);
388 id->cur_capacity1 = __le16_to_cpu(id->cur_capacity1);
389 id->lba_capacity = __le32_to_cpu(id->lba_capacity);
390 id->dma_1word = __le16_to_cpu(id->dma_1word);
391 id->dma_mword = __le16_to_cpu(id->dma_mword);
392 id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes);
393 id->eide_dma_min = __le16_to_cpu(id->eide_dma_min);
394 id->eide_dma_time = __le16_to_cpu(id->eide_dma_time);
395 id->eide_pio = __le16_to_cpu(id->eide_pio);
396 id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy);
397 for (i = 0; i < 2; ++i)
398 id->words69_70[i] = __le16_to_cpu(id->words69_70[i]);
399 for (i = 0; i < 4; ++i)
400 id->words71_74[i] = __le16_to_cpu(id->words71_74[i]);
401 id->queue_depth = __le16_to_cpu(id->queue_depth);
402 for (i = 0; i < 4; ++i)
403 id->words76_79[i] = __le16_to_cpu(id->words76_79[i]);
404 id->major_rev_num = __le16_to_cpu(id->major_rev_num);
405 id->minor_rev_num = __le16_to_cpu(id->minor_rev_num);
406 id->command_set_1 = __le16_to_cpu(id->command_set_1);
407 id->command_set_2 = __le16_to_cpu(id->command_set_2);
408 id->cfsse = __le16_to_cpu(id->cfsse);
409 id->cfs_enable_1 = __le16_to_cpu(id->cfs_enable_1);
410 id->cfs_enable_2 = __le16_to_cpu(id->cfs_enable_2);
411 id->csf_default = __le16_to_cpu(id->csf_default);
412 id->dma_ultra = __le16_to_cpu(id->dma_ultra);
413 id->trseuc = __le16_to_cpu(id->trseuc);
414 id->trsEuc = __le16_to_cpu(id->trsEuc);
415 id->CurAPMvalues = __le16_to_cpu(id->CurAPMvalues);
416 id->mprc = __le16_to_cpu(id->mprc);
417 id->hw_config = __le16_to_cpu(id->hw_config);
418 id->acoustic = __le16_to_cpu(id->acoustic);
419 id->msrqs = __le16_to_cpu(id->msrqs);
420 id->sxfert = __le16_to_cpu(id->sxfert);
421 id->sal = __le16_to_cpu(id->sal);
422 id->spg = __le32_to_cpu(id->spg);
423 id->lba_capacity_2 = __le64_to_cpu(id->lba_capacity_2);
424 for (i = 0; i < 22; i++)
425 id->words104_125[i] = __le16_to_cpu(id->words104_125[i]);
426 id->last_lun = __le16_to_cpu(id->last_lun);
427 id->word127 = __le16_to_cpu(id->word127);
428 id->dlf = __le16_to_cpu(id->dlf);
429 id->csfo = __le16_to_cpu(id->csfo);
430 for (i = 0; i < 26; i++)
431 id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
432 id->word156 = __le16_to_cpu(id->word156);
433 for (i = 0; i < 3; i++)
434 id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
435 id->cfa_power = __le16_to_cpu(id->cfa_power);
436 for (i = 0; i < 14; i++)
437 id->words161_175[i] = __le16_to_cpu(id->words161_175[i]);
438 for (i = 0; i < 31; i++)
439 id->words176_205[i] = __le16_to_cpu(id->words176_205[i]);
440 for (i = 0; i < 48; i++)
441 id->words206_254[i] = __le16_to_cpu(id->words206_254[i]);
442 id->integrity_word = __le16_to_cpu(id->integrity_word);
444 # error "Please fix <asm/byteorder.h>"
450 * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
451 * removing leading/trailing blanks and compressing internal blanks.
452 * It is primarily used to tidy up the model name/number fields as
453 * returned by the WIN_[P]IDENTIFY commands.
456 void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
458 u8 *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
461 /* convert from big-endian to host byte order */
462 for (p = end ; p != s;) {
463 unsigned short *pp = (unsigned short *) (p -= 2);
467 /* strip leading blanks */
468 while (s != end && *s == ' ')
470 /* compress internal blanks and strip trailing blanks */
471 while (s != end && *s) {
472 if (*s++ != ' ' || (s != end && *s && *s != ' '))
475 /* wipe out trailing garbage */
480 EXPORT_SYMBOL(ide_fixstring);
483 * Needed for PCI irq sharing
485 int drive_is_ready (ide_drive_t *drive)
487 ide_hwif_t *hwif = HWIF(drive);
490 if (drive->waiting_for_dma)
491 return hwif->dma_ops->dma_test_irq(drive);
494 /* need to guarantee 400ns since last command was issued */
499 * We do a passive status test under shared PCI interrupts on
500 * cards that truly share the ATA side interrupt, but may also share
501 * an interrupt with another pci card/device. We make no assumptions
502 * about possible isa-pnp and pci-pnp issues yet.
504 if (hwif->io_ports.ctl_addr)
505 stat = ide_read_altstatus(drive);
507 /* Note: this may clear a pending IRQ!! */
508 stat = ide_read_status(drive);
510 if (stat & BUSY_STAT)
511 /* drive busy: definitely not interrupting */
514 /* drive ready: *might* be interrupting */
518 EXPORT_SYMBOL(drive_is_ready);
521 * This routine busy-waits for the drive status to be not "busy".
522 * It then checks the status for all of the "good" bits and none
523 * of the "bad" bits, and if all is okay it returns 0. All other
524 * cases return error -- caller may then invoke ide_error().
526 * This routine should get fixed to not hog the cpu during extra long waits..
527 * That could be done by busy-waiting for the first jiffy or two, and then
528 * setting a timer to wake up at half second intervals thereafter,
529 * until timeout is achieved, before timing out.
531 static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
537 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
538 stat = ide_read_status(drive);
540 if (stat & BUSY_STAT) {
541 local_irq_set(flags);
543 while ((stat = ide_read_status(drive)) & BUSY_STAT) {
544 if (time_after(jiffies, timeout)) {
546 * One last read after the timeout in case
547 * heavy interrupt load made us not make any
548 * progress during the timeout..
550 stat = ide_read_status(drive);
551 if (!(stat & BUSY_STAT))
554 local_irq_restore(flags);
559 local_irq_restore(flags);
562 * Allow status to settle, then read it again.
563 * A few rare drives vastly violate the 400ns spec here,
564 * so we'll wait up to 10usec for a "good" status
565 * rather than expensively fail things immediately.
566 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
568 for (i = 0; i < 10; i++) {
570 stat = ide_read_status(drive);
572 if (OK_STAT(stat, good, bad)) {
582 * In case of error returns error value after doing "*startstop = ide_error()".
583 * The caller should return the updated value of "startstop" in this case,
584 * "startstop" is unchanged when the function returns 0.
586 int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
591 /* bail early if we've exceeded max_failures */
592 if (drive->max_failures && (drive->failures > drive->max_failures)) {
593 *startstop = ide_stopped;
597 err = __ide_wait_stat(drive, good, bad, timeout, &stat);
600 char *s = (err == -EBUSY) ? "status timeout" : "status error";
601 *startstop = ide_error(drive, s, stat);
607 EXPORT_SYMBOL(ide_wait_stat);
610 * ide_in_drive_list - look for drive in black/white list
611 * @id: drive identifier
612 * @drive_table: list to inspect
614 * Look for a drive in the blacklist and the whitelist tables
615 * Returns 1 if the drive is found in the table.
618 int ide_in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table)
620 for ( ; drive_table->id_model; drive_table++)
621 if ((!strcmp(drive_table->id_model, id->model)) &&
622 (!drive_table->id_firmware ||
623 strstr(id->fw_rev, drive_table->id_firmware)))
628 EXPORT_SYMBOL_GPL(ide_in_drive_list);
631 * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
632 * We list them here and depend on the device side cable detection for them.
634 * Some optical devices with the buggy firmwares have the same problem.
636 static const struct drive_list_entry ivb_list[] = {
637 { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
638 { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
639 { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
640 { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
641 { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
642 { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
643 { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
648 * All hosts that use the 80c ribbon must use!
649 * The name is derived from upper byte of word 93 and the 80c ribbon.
651 u8 eighty_ninty_three (ide_drive_t *drive)
653 ide_hwif_t *hwif = drive->hwif;
654 struct hd_driveid *id = drive->id;
655 int ivb = ide_in_drive_list(id, ivb_list);
657 if (hwif->cbl == ATA_CBL_PATA40_SHORT)
661 printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
664 if (ide_dev_is_sata(id) && !ivb)
667 if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
672 * - change master/slave IDENTIFY order
673 * - force bit13 (80c cable present) check also for !ivb devices
674 * (unless the slave device is pre-ATA3)
676 if ((id->hw_config & 0x4000) || (ivb && (id->hw_config & 0x2000)))
680 if (drive->udma33_warned == 1)
683 printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
684 "limiting max speed to UDMA33\n",
686 hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
688 drive->udma33_warned = 1;
693 int ide_driveid_update(ide_drive_t *drive)
695 ide_hwif_t *hwif = drive->hwif;
696 struct hd_driveid *id;
697 unsigned long timeout, flags;
701 * Re-read drive->id for possible DMA mode
702 * change (copied from ide-probe.c)
705 SELECT_MASK(drive, 1);
706 ide_set_irq(drive, 0);
708 hwif->exec_command(hwif, WIN_IDENTIFY);
709 timeout = jiffies + WAIT_WORSTCASE;
711 if (time_after(jiffies, timeout)) {
712 SELECT_MASK(drive, 0);
713 return 0; /* drive timed-out */
716 msleep(50); /* give drive a breather */
717 stat = ide_read_altstatus(drive);
718 } while (stat & BUSY_STAT);
720 msleep(50); /* wait for IRQ and DRQ_STAT */
721 stat = ide_read_status(drive);
723 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
724 SELECT_MASK(drive, 0);
725 printk("%s: CHECK for good STATUS\n", drive->name);
728 local_irq_save(flags);
729 SELECT_MASK(drive, 0);
730 id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
732 local_irq_restore(flags);
735 hwif->input_data(drive, NULL, id, SECTOR_SIZE);
736 (void)ide_read_status(drive); /* clear drive IRQ */
738 local_irq_restore(flags);
741 drive->id->dma_ultra = id->dma_ultra;
742 drive->id->dma_mword = id->dma_mword;
743 drive->id->dma_1word = id->dma_1word;
744 /* anything more ? */
747 if (drive->using_dma && ide_id_dma_bug(drive))
754 int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
756 ide_hwif_t *hwif = drive->hwif;
757 struct ide_io_ports *io_ports = &hwif->io_ports;
761 #ifdef CONFIG_BLK_DEV_IDEDMA
762 if (hwif->dma_ops) /* check if host supports DMA */
763 hwif->dma_ops->dma_host_set(drive, 0);
766 /* Skip setting PIO flow-control modes on pre-EIDE drives */
767 if ((speed & 0xf8) == XFER_PIO_0 && !(drive->id->capability & 0x08))
771 * Don't use ide_wait_cmd here - it will
772 * attempt to set_geometry and recalibrate,
773 * but for some reason these don't work at
774 * this point (lost interrupt).
777 * Select the drive, and issue the SETFEATURES command
779 disable_irq_nosync(hwif->irq);
782 * FIXME: we race against the running IRQ here if
783 * this is called from non IRQ context. If we use
784 * disable_irq() we hang on the error path. Work
790 SELECT_MASK(drive, 0);
792 ide_set_irq(drive, 0);
793 hwif->OUTB(speed, io_ports->nsect_addr);
794 hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
795 hwif->exec_command(hwif, WIN_SETFEATURES);
796 if (drive->quirk_list == 2)
797 ide_set_irq(drive, 1);
799 error = __ide_wait_stat(drive, drive->ready_stat,
800 BUSY_STAT|DRQ_STAT|ERR_STAT,
803 SELECT_MASK(drive, 0);
805 enable_irq(hwif->irq);
808 (void) ide_dump_status(drive, "set_drive_speed_status", stat);
812 drive->id->dma_ultra &= ~0xFF00;
813 drive->id->dma_mword &= ~0x0F00;
814 drive->id->dma_1word &= ~0x0F00;
817 #ifdef CONFIG_BLK_DEV_IDEDMA
818 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
820 hwif->dma_ops->dma_host_set(drive, 1);
821 else if (hwif->dma_ops) /* check if host supports DMA */
822 ide_dma_off_quietly(drive);
826 case XFER_UDMA_7: drive->id->dma_ultra |= 0x8080; break;
827 case XFER_UDMA_6: drive->id->dma_ultra |= 0x4040; break;
828 case XFER_UDMA_5: drive->id->dma_ultra |= 0x2020; break;
829 case XFER_UDMA_4: drive->id->dma_ultra |= 0x1010; break;
830 case XFER_UDMA_3: drive->id->dma_ultra |= 0x0808; break;
831 case XFER_UDMA_2: drive->id->dma_ultra |= 0x0404; break;
832 case XFER_UDMA_1: drive->id->dma_ultra |= 0x0202; break;
833 case XFER_UDMA_0: drive->id->dma_ultra |= 0x0101; break;
834 case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
835 case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
836 case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
837 case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
838 case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
839 case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
842 if (!drive->init_speed)
843 drive->init_speed = speed;
844 drive->current_speed = speed;
849 * This should get invoked any time we exit the driver to
850 * wait for an interrupt response from a drive. handler() points
851 * at the appropriate code to handle the next interrupt, and a
852 * timer is started to prevent us from waiting forever in case
853 * something goes wrong (see the ide_timer_expiry() handler later on).
855 * See also ide_execute_command
857 static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
858 unsigned int timeout, ide_expiry_t *expiry)
860 ide_hwgroup_t *hwgroup = HWGROUP(drive);
862 BUG_ON(hwgroup->handler);
863 hwgroup->handler = handler;
864 hwgroup->expiry = expiry;
865 hwgroup->timer.expires = jiffies + timeout;
866 hwgroup->req_gen_timer = hwgroup->req_gen;
867 add_timer(&hwgroup->timer);
870 void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
871 unsigned int timeout, ide_expiry_t *expiry)
874 spin_lock_irqsave(&ide_lock, flags);
875 __ide_set_handler(drive, handler, timeout, expiry);
876 spin_unlock_irqrestore(&ide_lock, flags);
879 EXPORT_SYMBOL(ide_set_handler);
882 * ide_execute_command - execute an IDE command
883 * @drive: IDE drive to issue the command against
884 * @command: command byte to write
885 * @handler: handler for next phase
886 * @timeout: timeout for command
887 * @expiry: handler to run on timeout
889 * Helper function to issue an IDE command. This handles the
890 * atomicity requirements, command timing and ensures that the
891 * handler and IRQ setup do not race. All IDE command kick off
892 * should go via this function or do equivalent locking.
895 void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
896 unsigned timeout, ide_expiry_t *expiry)
899 ide_hwif_t *hwif = HWIF(drive);
901 spin_lock_irqsave(&ide_lock, flags);
902 __ide_set_handler(drive, handler, timeout, expiry);
903 hwif->exec_command(hwif, cmd);
905 * Drive takes 400nS to respond, we must avoid the IRQ being
906 * serviced before that.
908 * FIXME: we could skip this delay with care on non shared devices
911 spin_unlock_irqrestore(&ide_lock, flags);
913 EXPORT_SYMBOL(ide_execute_command);
915 void ide_execute_pkt_cmd(ide_drive_t *drive)
917 ide_hwif_t *hwif = drive->hwif;
920 spin_lock_irqsave(&ide_lock, flags);
921 hwif->exec_command(hwif, WIN_PACKETCMD);
923 spin_unlock_irqrestore(&ide_lock, flags);
925 EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
927 static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
929 struct request *rq = drive->hwif->hwgroup->rq;
931 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
932 ide_end_request(drive, err ? err : 1, 0);
936 static ide_startstop_t do_reset1 (ide_drive_t *, int);
939 * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
940 * during an atapi drive reset operation. If the drive has not yet responded,
941 * and we have not yet hit our maximum waiting time, then the timer is restarted
944 static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
946 ide_hwgroup_t *hwgroup = HWGROUP(drive);
951 stat = ide_read_status(drive);
953 if (OK_STAT(stat, 0, BUSY_STAT))
954 printk("%s: ATAPI reset complete\n", drive->name);
956 if (time_before(jiffies, hwgroup->poll_timeout)) {
957 ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
958 /* continue polling */
962 hwgroup->polling = 0;
963 printk("%s: ATAPI reset timed-out, status=0x%02x\n",
965 /* do it the old fashioned way */
966 return do_reset1(drive, 1);
969 hwgroup->polling = 0;
970 ide_complete_drive_reset(drive, 0);
975 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
976 * during an ide reset operation. If the drives have not yet responded,
977 * and we have not yet hit our maximum waiting time, then the timer is restarted
980 static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
982 ide_hwgroup_t *hwgroup = HWGROUP(drive);
983 ide_hwif_t *hwif = HWIF(drive);
984 const struct ide_port_ops *port_ops = hwif->port_ops;
988 if (port_ops && port_ops->reset_poll) {
989 err = port_ops->reset_poll(drive);
991 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
992 hwif->name, drive->name);
997 tmp = ide_read_status(drive);
999 if (!OK_STAT(tmp, 0, BUSY_STAT)) {
1000 if (time_before(jiffies, hwgroup->poll_timeout)) {
1001 ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1002 /* continue polling */
1005 printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
1009 printk("%s: reset: ", hwif->name);
1010 tmp = ide_read_error(drive);
1013 printk("success\n");
1014 drive->failures = 0;
1018 switch (tmp & 0x7f) {
1019 case 1: printk("passed");
1021 case 2: printk("formatter device error");
1023 case 3: printk("sector buffer error");
1025 case 4: printk("ECC circuitry error");
1027 case 5: printk("controlling MPU error");
1029 default:printk("error (0x%02x?)", tmp);
1032 printk("; slave: failed");
1038 hwgroup->polling = 0; /* done polling */
1039 ide_complete_drive_reset(drive, err);
1043 static void ide_disk_pre_reset(ide_drive_t *drive)
1045 int legacy = (drive->id->cfs_enable_2 & 0x0400) ? 0 : 1;
1047 drive->special.all = 0;
1048 drive->special.b.set_geometry = legacy;
1049 drive->special.b.recalibrate = legacy;
1050 drive->mult_count = 0;
1051 if (!drive->keep_settings && !drive->using_dma)
1052 drive->mult_req = 0;
1053 if (drive->mult_req != drive->mult_count)
1054 drive->special.b.set_multmode = 1;
1057 static void pre_reset(ide_drive_t *drive)
1059 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
1061 if (drive->media == ide_disk)
1062 ide_disk_pre_reset(drive);
1064 drive->post_reset = 1;
1066 if (drive->using_dma) {
1067 if (drive->crc_count)
1068 ide_check_dma_crc(drive);
1073 if (!drive->keep_settings) {
1074 if (!drive->using_dma) {
1076 drive->io_32bit = 0;
1081 if (port_ops && port_ops->pre_reset)
1082 port_ops->pre_reset(drive);
1084 if (drive->current_speed != 0xff)
1085 drive->desired_speed = drive->current_speed;
1086 drive->current_speed = 0xff;
1090 * do_reset1() attempts to recover a confused drive by resetting it.
1091 * Unfortunately, resetting a disk drive actually resets all devices on
1092 * the same interface, so it can really be thought of as resetting the
1093 * interface rather than resetting the drive.
1095 * ATAPI devices have their own reset mechanism which allows them to be
1096 * individually reset without clobbering other devices on the same interface.
1098 * Unfortunately, the IDE interface does not generate an interrupt to let
1099 * us know when the reset operation has finished, so we must poll for this.
1100 * Equally poor, though, is the fact that this may a very long time to complete,
1101 * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
1102 * we set a timer to poll at 50ms intervals.
1104 static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1107 unsigned long flags;
1109 ide_hwgroup_t *hwgroup;
1110 struct ide_io_ports *io_ports;
1111 const struct ide_port_ops *port_ops;
1114 spin_lock_irqsave(&ide_lock, flags);
1116 hwgroup = HWGROUP(drive);
1118 io_ports = &hwif->io_ports;
1120 /* We must not reset with running handlers */
1121 BUG_ON(hwgroup->handler != NULL);
1123 /* For an ATAPI device, first try an ATAPI SRST. */
1124 if (drive->media != ide_disk && !do_not_try_atapi) {
1126 SELECT_DRIVE(drive);
1128 hwif->exec_command(hwif, WIN_SRST);
1130 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1131 hwgroup->polling = 1;
1132 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1133 spin_unlock_irqrestore(&ide_lock, flags);
1138 * First, reset any device state data we were maintaining
1139 * for any of the drives on this interface.
1141 for (unit = 0; unit < MAX_DRIVES; ++unit)
1142 pre_reset(&hwif->drives[unit]);
1144 if (io_ports->ctl_addr == 0) {
1145 spin_unlock_irqrestore(&ide_lock, flags);
1146 ide_complete_drive_reset(drive, -ENXIO);
1151 * Note that we also set nIEN while resetting the device,
1152 * to mask unwanted interrupts from the interface during the reset.
1153 * However, due to the design of PC hardware, this will cause an
1154 * immediate interrupt due to the edge transition it produces.
1155 * This single interrupt gives us a "fast poll" for drives that
1156 * recover from reset very quickly, saving us the first 50ms wait time.
1158 /* set SRST and nIEN */
1159 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr);
1160 /* more than enough time */
1162 if (drive->quirk_list == 2)
1163 ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */
1165 ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */
1166 hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
1167 /* more than enough time */
1169 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1170 hwgroup->polling = 1;
1171 __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1174 * Some weird controller like resetting themselves to a strange
1175 * state when the disks are reset this way. At least, the Winbond
1176 * 553 documentation says that
1178 port_ops = hwif->port_ops;
1179 if (port_ops && port_ops->resetproc)
1180 port_ops->resetproc(drive);
1182 spin_unlock_irqrestore(&ide_lock, flags);
1187 * ide_do_reset() is the entry point to the drive/interface reset code.
1190 ide_startstop_t ide_do_reset (ide_drive_t *drive)
1192 return do_reset1(drive, 0);
1195 EXPORT_SYMBOL(ide_do_reset);
1198 * ide_wait_not_busy() waits for the currently selected device on the hwif
1199 * to report a non-busy status, see comments in ide_probe_port().
1201 int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1207 * Turn this into a schedule() sleep once I'm sure
1208 * about locking issues (2.5 work ?).
1211 stat = hwif->INB(hwif->io_ports.status_addr);
1212 if ((stat & BUSY_STAT) == 0)
1215 * Assume a value of 0xff means nothing is connected to
1216 * the interface and it doesn't implement the pull-down
1221 touch_softlockup_watchdog();
1222 touch_nmi_watchdog();
1227 EXPORT_SYMBOL_GPL(ide_wait_not_busy);