4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/hdreg.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
50 #include <linux/scatterlist.h>
51 #include <linux/bitops.h>
53 #include <asm/byteorder.h>
55 #include <asm/uaccess.h>
58 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
59 int uptodate, unsigned int nr_bytes, int dequeue)
65 error = uptodate ? uptodate : -EIO;
68 * if failfast is set on a request, override number of sectors and
69 * complete the whole request right now
71 if (blk_noretry_request(rq) && error)
72 nr_bytes = rq->hard_nr_sectors << 9;
74 if (!blk_fs_request(rq) && error && !rq->errors)
78 * decide whether to reenable DMA -- 3 is a random magic for now,
79 * if we DMA timeout more than 3 times, just stay in PIO
81 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
82 drive->retry_pio <= 3) {
83 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
87 if (!blk_end_request(rq, error, nr_bytes))
90 if (ret == 0 && dequeue)
91 drive->hwif->rq = NULL;
97 * ide_end_request - complete an IDE I/O
98 * @drive: IDE device for the I/O
100 * @nr_sectors: number of sectors completed
102 * This is our end_request wrapper function. We complete the I/O
103 * update random number input and dequeue the request, which if
104 * it was tagged may be out of order.
107 int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
109 unsigned int nr_bytes = nr_sectors << 9;
110 struct request *rq = drive->hwif->rq;
113 if (blk_pc_request(rq))
114 nr_bytes = rq->data_len;
116 nr_bytes = rq->hard_cur_sectors << 9;
119 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
121 EXPORT_SYMBOL(ide_end_request);
124 * ide_end_dequeued_request - complete an IDE I/O
125 * @drive: IDE device for the I/O
127 * @nr_sectors: number of sectors completed
129 * Complete an I/O that is no longer on the request queue. This
130 * typically occurs when we pull the request and issue a REQUEST_SENSE.
131 * We must still finish the old request but we must not tamper with the
132 * queue in the meantime.
134 * NOTE: This path does not handle barrier, but barrier is not supported
138 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
139 int uptodate, int nr_sectors)
141 BUG_ON(!blk_rq_started(rq));
143 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
145 EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
147 static void ide_complete_task(ide_drive_t *drive, ide_task_t *task,
150 struct ide_taskfile *tf = &task->tf;
155 drive->hwif->tp_ops->tf_read(drive, task);
157 if (task->tf_flags & IDE_TFLAG_DYN)
162 * ide_end_drive_cmd - end an explicit drive command
167 * Clean up after success/failure of an explicit drive command.
168 * These get thrown onto the queue so they are synchronized with
169 * real I/O operations on the drive.
171 * In LBA48 mode we have to read the register set twice to get
172 * all the extra information out.
175 void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
177 ide_hwif_t *hwif = drive->hwif;
178 struct request *rq = hwif->rq;
180 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
181 ide_task_t *task = (ide_task_t *)rq->special;
184 ide_complete_task(drive, task, stat, err);
185 } else if (blk_pm_request(rq)) {
186 ide_complete_pm_rq(drive, rq);
194 if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
198 EXPORT_SYMBOL(ide_end_drive_cmd);
200 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
203 struct ide_driver *drv;
205 drv = *(struct ide_driver **)rq->rq_disk->private_data;
206 drv->end_request(drive, 0, 0);
208 ide_end_request(drive, 0, 0);
211 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
213 tf->nsect = drive->sect;
214 tf->lbal = drive->sect;
215 tf->lbam = drive->cyl;
216 tf->lbah = drive->cyl >> 8;
217 tf->device = (drive->head - 1) | drive->select;
218 tf->command = ATA_CMD_INIT_DEV_PARAMS;
221 static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
223 tf->nsect = drive->sect;
224 tf->command = ATA_CMD_RESTORE;
227 static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
229 tf->nsect = drive->mult_req;
230 tf->command = ATA_CMD_SET_MULTI;
233 static ide_startstop_t ide_disk_special(ide_drive_t *drive)
235 special_t *s = &drive->special;
238 memset(&args, 0, sizeof(ide_task_t));
239 args.data_phase = TASKFILE_NO_DATA;
241 if (s->b.set_geometry) {
242 s->b.set_geometry = 0;
243 ide_tf_set_specify_cmd(drive, &args.tf);
244 } else if (s->b.recalibrate) {
245 s->b.recalibrate = 0;
246 ide_tf_set_restore_cmd(drive, &args.tf);
247 } else if (s->b.set_multmode) {
248 s->b.set_multmode = 0;
249 ide_tf_set_setmult_cmd(drive, &args.tf);
251 int special = s->all;
253 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
257 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
258 IDE_TFLAG_CUSTOM_HANDLER;
260 do_rw_taskfile(drive, &args);
266 * do_special - issue some special commands
267 * @drive: drive the command is for
269 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
270 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
272 * It used to do much more, but has been scaled back.
275 static ide_startstop_t do_special (ide_drive_t *drive)
277 special_t *s = &drive->special;
280 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
282 if (drive->media == ide_disk)
283 return ide_disk_special(drive);
290 void ide_map_sg(ide_drive_t *drive, struct request *rq)
292 ide_hwif_t *hwif = drive->hwif;
293 struct scatterlist *sg = hwif->sg_table;
295 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
296 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
298 } else if (!rq->bio) {
299 sg_init_one(sg, rq->data, rq->data_len);
302 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
306 EXPORT_SYMBOL_GPL(ide_map_sg);
308 void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
310 ide_hwif_t *hwif = drive->hwif;
312 hwif->nsect = hwif->nleft = rq->nr_sectors;
317 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
320 * execute_drive_command - issue special drive command
321 * @drive: the drive to issue the command on
322 * @rq: the request structure holding the command
324 * execute_drive_cmd() issues a special drive command, usually
325 * initiated by ioctl() from the external hdparm program. The
326 * command can be a drive command, drive task or taskfile
327 * operation. Weirdly you can call it with NULL to wait for
328 * all commands to finish. Don't do this as that is due to change
331 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
334 ide_hwif_t *hwif = drive->hwif;
335 ide_task_t *task = rq->special;
338 hwif->data_phase = task->data_phase;
340 switch (hwif->data_phase) {
341 case TASKFILE_MULTI_OUT:
343 case TASKFILE_MULTI_IN:
345 ide_init_sg_cmd(drive, rq);
346 ide_map_sg(drive, rq);
351 return do_rw_taskfile(drive, task);
355 * NULL is actually a valid way of waiting for
356 * all current requests to be flushed from the queue.
359 printk("%s: DRIVE_CMD (null)\n", drive->name);
361 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
362 ide_read_error(drive));
367 static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
373 case REQ_UNPARK_HEADS:
374 return ide_do_park_unpark(drive, rq);
375 case REQ_DEVSET_EXEC:
376 return ide_do_devset(drive, rq);
377 case REQ_DRIVE_RESET:
378 return ide_do_reset(drive);
380 blk_dump_rq_flags(rq, "ide_special_rq - bad request");
381 ide_end_request(drive, 0, 0);
387 * start_request - start of I/O and command issuing for IDE
389 * start_request() initiates handling of a new I/O request. It
390 * accepts commands and I/O (read/write) requests.
392 * FIXME: this function needs a rename
395 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
397 ide_startstop_t startstop;
399 BUG_ON(!blk_rq_started(rq));
402 printk("%s: start_request: current=0x%08lx\n",
403 drive->hwif->name, (unsigned long) rq);
406 /* bail early if we've exceeded max_failures */
407 if (drive->max_failures && (drive->failures > drive->max_failures)) {
408 rq->cmd_flags |= REQ_FAILED;
412 if (blk_pm_request(rq))
413 ide_check_pm_state(drive, rq);
416 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
417 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
418 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
421 if (!drive->special.all) {
422 struct ide_driver *drv;
425 * We reset the drive so we need to issue a SETFEATURES.
426 * Do it _after_ do_special() restored device parameters.
428 if (drive->current_speed == 0xff)
429 ide_config_drive_speed(drive, drive->desired_speed);
431 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
432 return execute_drive_cmd(drive, rq);
433 else if (blk_pm_request(rq)) {
434 struct request_pm_state *pm = rq->data;
436 printk("%s: start_power_step(step: %d)\n",
437 drive->name, pm->pm_step);
439 startstop = ide_start_power_step(drive, rq);
440 if (startstop == ide_stopped &&
441 pm->pm_step == IDE_PM_COMPLETED)
442 ide_complete_pm_rq(drive, rq);
444 } else if (!rq->rq_disk && blk_special_request(rq))
446 * TODO: Once all ULDs have been modified to
447 * check for specific op codes rather than
448 * blindly accepting any special request, the
449 * check for ->rq_disk above may be replaced
450 * by a more suitable mechanism or even
453 return ide_special_rq(drive, rq);
455 drv = *(struct ide_driver **)rq->rq_disk->private_data;
457 return drv->do_request(drive, rq, rq->sector);
459 return do_special(drive);
461 ide_kill_rq(drive, rq);
466 * ide_stall_queue - pause an IDE device
467 * @drive: drive to stall
468 * @timeout: time to stall for (jiffies)
470 * ide_stall_queue() can be used by a drive to give excess bandwidth back
471 * to the port by sleeping for timeout jiffies.
474 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
476 if (timeout > WAIT_WORSTCASE)
477 timeout = WAIT_WORSTCASE;
478 drive->sleep = timeout + jiffies;
479 drive->dev_flags |= IDE_DFLAG_SLEEPING;
481 EXPORT_SYMBOL(ide_stall_queue);
483 static inline int ide_lock_port(ide_hwif_t *hwif)
493 static inline void ide_unlock_port(ide_hwif_t *hwif)
498 static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
502 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
503 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
506 host->get_lock(ide_intr, hwif);
512 static inline void ide_unlock_host(struct ide_host *host)
514 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
515 if (host->release_lock)
516 host->release_lock();
517 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
522 * Issue a new request to a device.
524 void do_ide_request(struct request_queue *q)
526 ide_drive_t *drive = q->queuedata;
527 ide_hwif_t *hwif = drive->hwif;
528 struct ide_host *host = hwif->host;
529 struct request *rq = NULL;
530 ide_startstop_t startstop;
533 * drive is doing pre-flush, ordered write, post-flush sequence. even
534 * though that is 3 requests, it must be seen as a single transaction.
535 * we must not preempt this drive until that is complete
537 if (blk_queue_flushing(q))
539 * small race where queue could get replugged during
540 * the 3-request flush cycle, just yank the plug since
541 * we want it to finish asap
545 spin_unlock_irq(q->queue_lock);
547 if (ide_lock_host(host, hwif))
550 spin_lock_irq(&hwif->lock);
552 if (!ide_lock_port(hwif)) {
553 ide_hwif_t *prev_port;
555 prev_port = hwif->host->cur_port;
558 if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
559 if (time_before(drive->sleep, jiffies)) {
560 ide_unlock_port(hwif);
565 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
568 * set nIEN for previous port, drives in the
569 * quirk_list may not like intr setups/cleanups
571 if (prev_port && prev_port->cur_dev->quirk_list == 0)
572 prev_port->tp_ops->set_irq(prev_port, 0);
574 hwif->host->cur_port = hwif;
576 hwif->cur_dev = drive;
577 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
579 spin_unlock_irq(&hwif->lock);
580 spin_lock_irq(q->queue_lock);
582 * we know that the queue isn't empty, but this can happen
583 * if the q->prep_rq_fn() decides to kill a request
585 rq = elv_next_request(drive->queue);
586 spin_unlock_irq(q->queue_lock);
587 spin_lock_irq(&hwif->lock);
590 ide_unlock_port(hwif);
595 * Sanity: don't accept a request that isn't a PM request
596 * if we are currently power managed. This is very important as
597 * blk_stop_queue() doesn't prevent the elv_next_request()
598 * above to return us whatever is in the queue. Since we call
599 * ide_do_request() ourselves, we end up taking requests while
600 * the queue is blocked...
602 * We let requests forced at head of queue with ide-preempt
603 * though. I hope that doesn't happen too much, hopefully not
604 * unless the subdriver triggers such a thing in its own PM
607 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
608 blk_pm_request(rq) == 0 &&
609 (rq->cmd_flags & REQ_PREEMPT) == 0) {
610 /* there should be no pending command at this point */
611 ide_unlock_port(hwif);
617 spin_unlock_irq(&hwif->lock);
618 startstop = start_request(drive, rq);
619 spin_lock_irq(&hwif->lock);
621 if (startstop == ide_stopped)
626 spin_unlock_irq(&hwif->lock);
628 ide_unlock_host(host);
629 spin_lock_irq(q->queue_lock);
633 spin_unlock_irq(&hwif->lock);
634 ide_unlock_host(host);
636 spin_lock_irq(q->queue_lock);
638 if (!elv_queue_empty(q))
642 static void ide_plug_device(ide_drive_t *drive)
644 struct request_queue *q = drive->queue;
647 spin_lock_irqsave(q->queue_lock, flags);
648 if (!elv_queue_empty(q))
650 spin_unlock_irqrestore(q->queue_lock, flags);
653 static int drive_is_ready(ide_drive_t *drive)
655 ide_hwif_t *hwif = drive->hwif;
658 if (drive->waiting_for_dma)
659 return hwif->dma_ops->dma_test_irq(drive);
661 if (hwif->io_ports.ctl_addr &&
662 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
663 stat = hwif->tp_ops->read_altstatus(hwif);
665 /* Note: this may clear a pending IRQ!! */
666 stat = hwif->tp_ops->read_status(hwif);
669 /* drive busy: definitely not interrupting */
672 /* drive ready: *might* be interrupting */
677 * ide_timer_expiry - handle lack of an IDE interrupt
678 * @data: timer callback magic (hwif)
680 * An IDE command has timed out before the expected drive return
681 * occurred. At this point we attempt to clean up the current
682 * mess. If the current handler includes an expiry handler then
683 * we invoke the expiry handler, and providing it is happy the
684 * work is done. If that fails we apply generic recovery rules
685 * invoking the handler and checking the drive DMA status. We
686 * have an excessively incestuous relationship with the DMA
687 * logic that wants cleaning up.
690 void ide_timer_expiry (unsigned long data)
692 ide_hwif_t *hwif = (ide_hwif_t *)data;
693 ide_drive_t *uninitialized_var(drive);
694 ide_handler_t *handler;
699 spin_lock_irqsave(&hwif->lock, flags);
701 handler = hwif->handler;
703 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
705 * Either a marginal timeout occurred
706 * (got the interrupt just as timer expired),
707 * or we were "sleeping" to give other devices a chance.
708 * Either way, we don't really want to complain about anything.
711 ide_expiry_t *expiry = hwif->expiry;
712 ide_startstop_t startstop = ide_stopped;
714 drive = hwif->cur_dev;
717 wait = expiry(drive);
718 if (wait > 0) { /* continue */
720 hwif->timer.expires = jiffies + wait;
721 hwif->req_gen_timer = hwif->req_gen;
722 add_timer(&hwif->timer);
723 spin_unlock_irqrestore(&hwif->lock, flags);
727 hwif->handler = NULL;
729 * We need to simulate a real interrupt when invoking
730 * the handler() function, which means we need to
731 * globally mask the specific IRQ:
733 spin_unlock(&hwif->lock);
734 /* disable_irq_nosync ?? */
735 disable_irq(hwif->irq);
736 /* local CPU only, as if we were handling an interrupt */
739 startstop = handler(drive);
740 } else if (drive_is_ready(drive)) {
741 if (drive->waiting_for_dma)
742 hwif->dma_ops->dma_lost_irq(drive);
744 hwif->ack_intr(hwif);
745 printk(KERN_WARNING "%s: lost interrupt\n",
747 startstop = handler(drive);
749 if (drive->waiting_for_dma)
750 startstop = ide_dma_timeout_retry(drive, wait);
752 startstop = ide_error(drive, "irq timeout",
753 hwif->tp_ops->read_status(hwif));
755 spin_lock_irq(&hwif->lock);
756 enable_irq(hwif->irq);
757 if (startstop == ide_stopped) {
758 ide_unlock_port(hwif);
762 spin_unlock_irqrestore(&hwif->lock, flags);
765 ide_unlock_host(hwif->host);
766 ide_plug_device(drive);
771 * unexpected_intr - handle an unexpected IDE interrupt
772 * @irq: interrupt line
773 * @hwif: port being processed
775 * There's nothing really useful we can do with an unexpected interrupt,
776 * other than reading the status register (to clear it), and logging it.
777 * There should be no way that an irq can happen before we're ready for it,
778 * so we needn't worry much about losing an "important" interrupt here.
780 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
781 * the drive enters "idle", "standby", or "sleep" mode, so if the status
782 * looks "good", we just ignore the interrupt completely.
784 * This routine assumes __cli() is in effect when called.
786 * If an unexpected interrupt happens on irq15 while we are handling irq14
787 * and if the two interfaces are "serialized" (CMD640), then it looks like
788 * we could screw up by interfering with a new request being set up for
791 * In reality, this is a non-issue. The new command is not sent unless
792 * the drive is ready to accept one, in which case we know the drive is
793 * not trying to interrupt us. And ide_set_handler() is always invoked
794 * before completing the issuance of any new drive command, so we will not
795 * be accidentally invoked as a result of any valid command completion
799 static void unexpected_intr(int irq, ide_hwif_t *hwif)
801 u8 stat = hwif->tp_ops->read_status(hwif);
803 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
804 /* Try to not flood the console with msgs */
805 static unsigned long last_msgtime, count;
808 if (time_after(jiffies, last_msgtime + HZ)) {
809 last_msgtime = jiffies;
810 printk(KERN_ERR "%s: unexpected interrupt, "
811 "status=0x%02x, count=%ld\n",
812 hwif->name, stat, count);
818 * ide_intr - default IDE interrupt handler
819 * @irq: interrupt number
821 * @regs: unused weirdness from the kernel irq layer
823 * This is the default IRQ handler for the IDE layer. You should
824 * not need to override it. If you do be aware it is subtle in
827 * hwif is the interface in the group currently performing
828 * a command. hwif->cur_dev is the drive and hwif->handler is
829 * the IRQ handler to call. As we issue a command the handlers
830 * step through multiple states, reassigning the handler to the
831 * next step in the process. Unlike a smart SCSI controller IDE
832 * expects the main processor to sequence the various transfer
833 * stages. We also manage a poll timer to catch up with most
834 * timeout situations. There are still a few where the handlers
835 * don't ever decide to give up.
837 * The handler eventually returns ide_stopped to indicate the
838 * request completed. At this point we issue the next request
839 * on the port and the process begins again.
842 irqreturn_t ide_intr (int irq, void *dev_id)
844 ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
845 struct ide_host *host = hwif->host;
846 ide_drive_t *uninitialized_var(drive);
847 ide_handler_t *handler;
849 ide_startstop_t startstop;
850 irqreturn_t irq_ret = IRQ_NONE;
853 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
854 if (hwif != host->cur_port)
858 spin_lock_irqsave(&hwif->lock, flags);
860 if (hwif->ack_intr && hwif->ack_intr(hwif) == 0)
863 handler = hwif->handler;
865 if (handler == NULL || hwif->polling) {
867 * Not expecting an interrupt from this drive.
868 * That means this could be:
869 * (1) an interrupt from another PCI device
870 * sharing the same PCI INT# as us.
871 * or (2) a drive just entered sleep or standby mode,
872 * and is interrupting to let us know.
873 * or (3) a spurious interrupt of unknown origin.
875 * For PCI, we cannot tell the difference,
876 * so in that case we just ignore it and hope it goes away.
878 if ((host->irq_flags & IRQF_SHARED) == 0) {
880 * Probably not a shared PCI interrupt,
881 * so we can safely try to do something about it:
883 unexpected_intr(irq, hwif);
886 * Whack the status register, just in case
887 * we have a leftover pending IRQ.
889 (void)hwif->tp_ops->read_status(hwif);
894 drive = hwif->cur_dev;
896 if (!drive_is_ready(drive))
898 * This happens regularly when we share a PCI IRQ with
899 * another device. Unfortunately, it can also happen
900 * with some buggy drives that trigger the IRQ before
901 * their status register is up to date. Hopefully we have
902 * enough advance overhead that the latter isn't a problem.
906 hwif->handler = NULL;
908 del_timer(&hwif->timer);
909 spin_unlock(&hwif->lock);
911 if (hwif->port_ops && hwif->port_ops->clear_irq)
912 hwif->port_ops->clear_irq(drive);
914 if (drive->dev_flags & IDE_DFLAG_UNMASK)
915 local_irq_enable_in_hardirq();
917 /* service this interrupt, may set handler for next interrupt */
918 startstop = handler(drive);
920 spin_lock_irq(&hwif->lock);
922 * Note that handler() may have set things up for another
923 * interrupt to occur soon, but it cannot happen until
924 * we exit from this routine, because it will be the
925 * same irq as is currently being serviced here, and Linux
926 * won't allow another of the same (on any CPU) until we return.
928 if (startstop == ide_stopped) {
929 BUG_ON(hwif->handler);
930 ide_unlock_port(hwif);
933 irq_ret = IRQ_HANDLED;
935 spin_unlock_irqrestore(&hwif->lock, flags);
938 ide_unlock_host(hwif->host);
939 ide_plug_device(drive);
944 EXPORT_SYMBOL_GPL(ide_intr);
946 void ide_pad_transfer(ide_drive_t *drive, int write, int len)
948 ide_hwif_t *hwif = drive->hwif;
953 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
955 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
959 EXPORT_SYMBOL_GPL(ide_pad_transfer);