Merge branch 'for-2.6.31' of git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[safe/jmp/linux-2.6] / drivers / ide / ide-io.c
index 7007c48..41d8040 100644 (file)
@@ -40,7 +40,6 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/ide.h>
-#include <linux/hdreg.h>
 #include <linux/completion.h>
 #include <linux/reboot.h>
 #include <linux/cdrom.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-static int __ide_end_request(ide_drive_t *drive, struct request *rq,
-                            int uptodate, unsigned int nr_bytes, int dequeue)
+int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
+              unsigned int nr_bytes)
 {
-       int ret = 1;
-       int error = 0;
-
-       if (uptodate <= 0)
-               error = uptodate ? uptodate : -EIO;
-
-       /*
-        * if failfast is set on a request, override number of sectors and
-        * complete the whole request right now
-        */
-       if (blk_noretry_request(rq) && error)
-               nr_bytes = rq->hard_nr_sectors << 9;
-
-       if (!blk_fs_request(rq) && error && !rq->errors)
-               rq->errors = -EIO;
-
        /*
         * decide whether to reenable DMA -- 3 is a random magic for now,
         * if we DMA timeout more than 3 times, just stay in PIO
@@ -84,127 +67,99 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
                ide_dma_on(drive);
        }
 
-       if (!blk_end_request(rq, error, nr_bytes))
-               ret = 0;
+       return blk_end_request(rq, error, nr_bytes);
+}
+EXPORT_SYMBOL_GPL(ide_end_rq);
 
-       if (ret == 0 && dequeue)
-               drive->hwif->rq = NULL;
+void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
+{
+       const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
+       struct ide_taskfile *tf = &cmd->tf;
+       struct request *rq = cmd->rq;
+       u8 tf_cmd = tf->command;
 
-       return ret;
-}
+       tf->error = err;
+       tf->status = stat;
 
-/**
- *     ide_end_request         -       complete an IDE I/O
- *     @drive: IDE device for the I/O
- *     @uptodate:
- *     @nr_sectors: number of sectors completed
- *
- *     This is our end_request wrapper function. We complete the I/O
- *     update random number input and dequeue the request, which if
- *     it was tagged may be out of order.
- */
+       if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
+               u8 data[2];
 
-int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
-{
-       unsigned int nr_bytes = nr_sectors << 9;
-       struct request *rq = drive->hwif->rq;
+               tp_ops->input_data(drive, cmd, data, 2);
 
-       if (!nr_bytes) {
-               if (blk_pc_request(rq))
-                       nr_bytes = rq->data_len;
-               else
-                       nr_bytes = rq->hard_cur_sectors << 9;
+               cmd->tf.data  = data[0];
+               cmd->hob.data = data[1];
        }
 
-       return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
-}
-EXPORT_SYMBOL(ide_end_request);
+       ide_tf_readback(drive, cmd);
 
-/**
- *     ide_end_dequeued_request        -       complete an IDE I/O
- *     @drive: IDE device for the I/O
- *     @uptodate:
- *     @nr_sectors: number of sectors completed
- *
- *     Complete an I/O that is no longer on the request queue. This
- *     typically occurs when we pull the request and issue a REQUEST_SENSE.
- *     We must still finish the old request but we must not tamper with the
- *     queue in the meantime.
- *
- *     NOTE: This path does not handle barrier, but barrier is not supported
- *     on ide-cd anyway.
- */
+       if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
+           tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
+               if (tf->lbal != 0xc4) {
+                       printk(KERN_ERR "%s: head unload failed!\n",
+                              drive->name);
+                       ide_tf_dump(drive->name, cmd);
+               } else
+                       drive->dev_flags |= IDE_DFLAG_PARKED;
+       }
 
-int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
-                            int uptodate, int nr_sectors)
-{
-       BUG_ON(!blk_rq_started(rq));
+       if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+               struct ide_cmd *orig_cmd = rq->special;
 
-       return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
+               if (cmd->tf_flags & IDE_TFLAG_DYN)
+                       kfree(orig_cmd);
+               else
+                       memcpy(orig_cmd, cmd, sizeof(*cmd));
+       }
 }
-EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
 
-/**
- *     ide_end_drive_cmd       -       end an explicit drive command
- *     @drive: command 
- *     @stat: status bits
- *     @err: error bits
- *
- *     Clean up after success/failure of an explicit drive command.
- *     These get thrown onto the queue so they are synchronized with
- *     real I/O operations on the drive.
- *
- *     In LBA48 mode we have to read the register set twice to get
- *     all the extra information out.
- */
-void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+/* obsolete, blk_rq_bytes() should be used instead */
+unsigned int ide_rq_bytes(struct request *rq)
+{
+       if (blk_pc_request(rq))
+               return rq->data_len;
+       else
+               return rq->hard_cur_sectors << 9;
+}
+EXPORT_SYMBOL_GPL(ide_rq_bytes);
+
+int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq = hwif->rq;
+       int rc;
 
-       if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
-               ide_task_t *task = (ide_task_t *)rq->special;
-
-               if (task) {
-                       struct ide_taskfile *tf = &task->tf;
-
-                       tf->error = err;
-                       tf->status = stat;
-
-                       drive->hwif->tp_ops->tf_read(drive, task);
-
-                       if (task->tf_flags & IDE_TFLAG_DYN)
-                               kfree(task);
-               }
-       } else if (blk_pm_request(rq)) {
-               struct request_pm_state *pm = rq->data;
-
-               ide_complete_power_step(drive, rq);
-               if (pm->pm_step == IDE_PM_COMPLETED)
-                       ide_complete_pm_request(drive, rq);
-               return;
-       }
-
-       hwif->rq = NULL;
+       /*
+        * if failfast is set on a request, override number of sectors
+        * and complete the whole request right now
+        */
+       if (blk_noretry_request(rq) && error <= 0)
+               nr_bytes = rq->hard_nr_sectors << 9;
 
-       rq->errors = err;
+       rc = ide_end_rq(drive, rq, error, nr_bytes);
+       if (rc == 0)
+               hwif->rq = NULL;
 
-       if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
-                                    blk_rq_bytes(rq))))
-               BUG();
+       return rc;
 }
-EXPORT_SYMBOL(ide_end_drive_cmd);
+EXPORT_SYMBOL(ide_complete_rq);
 
 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
 {
-       if (rq->rq_disk) {
-               struct ide_driver *drv;
+       u8 drv_req = blk_special_request(rq) && rq->rq_disk;
+       u8 media = drive->media;
 
-               drv = *(struct ide_driver **)rq->rq_disk->private_data;
-               drv->end_request(drive, 0, 0);
-       } else
-               ide_end_request(drive, 0, 0);
+       drive->failed_pc = NULL;
+
+       if ((media == ide_floppy || media == ide_tape) && drv_req) {
+               rq->errors = 0;
+               ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+       } else {
+               if (media == ide_tape)
+                       rq->errors = IDE_DRV_ERROR_GENERAL;
+               else if (blk_fs_request(rq) == 0 && rq->errors == 0)
+                       rq->errors = -EIO;
+               ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
+       }
 }
 
 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
@@ -232,20 +187,20 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
 static ide_startstop_t ide_disk_special(ide_drive_t *drive)
 {
        special_t *s = &drive->special;
-       ide_task_t args;
+       struct ide_cmd cmd;
 
-       memset(&args, 0, sizeof(ide_task_t));
-       args.data_phase = TASKFILE_NO_DATA;
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.protocol = ATA_PROT_NODATA;
 
        if (s->b.set_geometry) {
                s->b.set_geometry = 0;
-               ide_tf_set_specify_cmd(drive, &args.tf);
+               ide_tf_set_specify_cmd(drive, &cmd.tf);
        } else if (s->b.recalibrate) {
                s->b.recalibrate = 0;
-               ide_tf_set_restore_cmd(drive, &args.tf);
+               ide_tf_set_restore_cmd(drive, &cmd.tf);
        } else if (s->b.set_multmode) {
                s->b.set_multmode = 0;
-               ide_tf_set_setmult_cmd(drive, &args.tf);
+               ide_tf_set_setmult_cmd(drive, &cmd.tf);
        } else if (s->all) {
                int special = s->all;
                s->all = 0;
@@ -253,10 +208,11 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
                return ide_stopped;
        }
 
-       args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
-                       IDE_TFLAG_CUSTOM_HANDLER;
+       cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
+       cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
+       cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
 
-       do_rw_taskfile(drive, &args);
+       do_rw_taskfile(drive, &cmd);
 
        return ide_started;
 }
@@ -286,33 +242,22 @@ static ide_startstop_t do_special (ide_drive_t *drive)
        return ide_stopped;
 }
 
-void ide_map_sg(ide_drive_t *drive, struct request *rq)
+void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct scatterlist *sg = hwif->sg_table;
+       struct request *rq = cmd->rq;
 
-       if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
-               sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
-               hwif->sg_nents = 1;
-       } else if (!rq->bio) {
-               sg_init_one(sg, rq->data, rq->data_len);
-               hwif->sg_nents = 1;
-       } else {
-               hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
-       }
+       cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
 }
-
 EXPORT_SYMBOL_GPL(ide_map_sg);
 
-void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
+void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
 {
-       ide_hwif_t *hwif = drive->hwif;
-
-       hwif->nsect = hwif->nleft = rq->nr_sectors;
-       hwif->cursg_ofs = 0;
-       hwif->cursg = NULL;
+       cmd->nbytes = cmd->nleft = nr_bytes;
+       cmd->cursg_ofs = 0;
+       cmd->cursg = NULL;
 }
-
 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
 
 /**
@@ -330,24 +275,15 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
                struct request *rq)
 {
-       ide_hwif_t *hwif = drive->hwif;
-       ide_task_t *task = rq->special;
-
-       if (task) {
-               hwif->data_phase = task->data_phase;
-
-               switch (hwif->data_phase) {
-               case TASKFILE_MULTI_OUT:
-               case TASKFILE_OUT:
-               case TASKFILE_MULTI_IN:
-               case TASKFILE_IN:
-                       ide_init_sg_cmd(drive, rq);
-                       ide_map_sg(drive, rq);
-               default:
-                       break;
+       struct ide_cmd *cmd = rq->special;
+
+       if (cmd) {
+               if (cmd->protocol == ATA_PROT_PIO) {
+                       ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
+                       ide_map_sg(drive, cmd);
                }
 
-               return do_rw_taskfile(drive, task);
+               return do_rw_taskfile(drive, cmd);
        }
 
        /*
@@ -357,8 +293,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
 #ifdef DEBUG
        printk("%s: DRIVE_CMD (null)\n", drive->name);
 #endif
-       ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
-                         ide_read_error(drive));
+       rq->errors = 0;
+       ide_complete_rq(drive, 0, blk_rq_bytes(rq));
 
        return ide_stopped;
 }
@@ -376,9 +312,7 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
        case REQ_DRIVE_RESET:
                return ide_do_reset(drive);
        default:
-               blk_dump_rq_flags(rq, "ide_special_rq - bad request");
-               ide_end_request(drive, 0, 0);
-               return ide_stopped;
+               BUG();
        }
 }
 
@@ -411,7 +345,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
        if (blk_pm_request(rq))
                ide_check_pm_state(drive, rq);
 
-       SELECT_DRIVE(drive);
+       drive->hwif->tp_ops->dev_select(drive);
        if (ide_wait_stat(&startstop, drive, drive->ready_stat,
                          ATA_BUSY | ATA_DRQ, WAIT_READY)) {
                printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
@@ -430,7 +364,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
                if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
                        return execute_drive_cmd(drive, rq);
                else if (blk_pm_request(rq)) {
-                       struct request_pm_state *pm = rq->data;
+                       struct request_pm_state *pm = rq->special;
 #ifdef DEBUG_PM
                        printk("%s: start_power_step(step: %d)\n",
                                drive->name, pm->pm_step);
@@ -438,7 +372,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
                        startstop = ide_start_power_step(drive, rq);
                        if (startstop == ide_stopped &&
                            pm->pm_step == IDE_PM_COMPLETED)
-                               ide_complete_pm_request(drive, rq);
+                               ide_complete_pm_rq(drive, rq);
                        return startstop;
                } else if (!rq->rq_disk && blk_special_request(rq))
                        /*
@@ -543,6 +477,9 @@ void do_ide_request(struct request_queue *q)
 
        spin_unlock_irq(q->queue_lock);
 
+       /* HLD do_request() callback might sleep, make sure it's okay */
+       might_sleep();
+
        if (ide_lock_host(host, hwif))
                goto plug_device_2;
 
@@ -554,11 +491,10 @@ repeat:
                prev_port = hwif->host->cur_port;
                hwif->rq = NULL;
 
-               if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
-                       if (time_before(drive->sleep, jiffies)) {
-                               ide_unlock_port(hwif);
-                               goto plug_device;
-                       }
+               if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
+                   time_after(drive->sleep, jiffies)) {
+                       ide_unlock_port(hwif);
+                       goto plug_device;
                }
 
                if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
@@ -568,7 +504,9 @@ repeat:
                         * quirk_list may not like intr setups/cleanups
                         */
                        if (prev_port && prev_port->cur_dev->quirk_list == 0)
-                               prev_port->tp_ops->set_irq(prev_port, 0);
+                               prev_port->tp_ops->write_devctl(prev_port,
+                                                               ATA_NIEN |
+                                                               ATA_DEVCTL_OBS);
 
                        hwif->host->cur_port = hwif;
                }
@@ -724,6 +662,7 @@ void ide_timer_expiry (unsigned long data)
                        }
                }
                hwif->handler = NULL;
+               hwif->expiry = NULL;
                /*
                 * We need to simulate a real interrupt when invoking
                 * the handler() function, which means we need to
@@ -753,7 +692,7 @@ void ide_timer_expiry (unsigned long data)
                }
                spin_lock_irq(&hwif->lock);
                enable_irq(hwif->irq);
-               if (startstop == ide_stopped) {
+               if (startstop == ide_stopped && hwif->polling == 0) {
                        ide_unlock_port(hwif);
                        plug_device = 1;
                }
@@ -903,6 +842,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
                goto out;
 
        hwif->handler = NULL;
+       hwif->expiry = NULL;
        hwif->req_gen++;
        del_timer(&hwif->timer);
        spin_unlock(&hwif->lock);
@@ -924,7 +864,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
         * same irq as is currently being serviced here, and Linux
         * won't allow another of the same (on any CPU) until we return.
         */
-       if (startstop == ide_stopped) {
+       if (startstop == ide_stopped && hwif->polling == 0) {
                BUG_ON(hwif->handler);
                ide_unlock_port(hwif);
                plug_device = 1;