X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fide%2Fide-io.c;h=99e0bbca3ace197319ed8f29df374932d98e20c7;hb=02d599a365d04658bc9ea71762ed17c895079927;hp=ba6039b55b41ebb348223db10dc4f9a9ea9cfd0e;hpb=8c2c0118b86183bf4826db990cae5c8a8d6c6746;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ba6039b..99e0bbc 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -47,26 +48,30 @@ #include #include #include +#include #include #include #include #include -#include static int __ide_end_request(ide_drive_t *drive, struct request *rq, - int uptodate, int nr_sectors) + int uptodate, unsigned int nr_bytes, int dequeue) { int ret = 1; + int error = 0; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; /* * if failfast is set on a request, override number of sectors and * complete the whole request right now */ - if (blk_noretry_request(rq) && end_io_error(uptodate)) - nr_sectors = rq->hard_nr_sectors; + if (blk_noretry_request(rq) && error) + nr_bytes = rq->hard_nr_sectors << 9; - if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) + if (!blk_fs_request(rq) && error && !rq->errors) rq->errors = -EIO; /* @@ -75,15 +80,12 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, */ if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { drive->state = 0; - HWGROUP(drive)->hwif->ide_dma_on(drive); + ide_dma_on(drive); } - if (!end_that_request_first(rq, uptodate, nr_sectors)) { - add_disk_randomness(rq->rq_disk); - if (!list_empty(&rq->queuelist)) - blkdev_dequeue_request(rq); - HWGROUP(drive)->rq = NULL; - end_that_request_last(rq, uptodate); + if (!__blk_end_request(rq, error, nr_bytes)) { + if (dequeue) + HWGROUP(drive)->rq = NULL; ret = 0; } @@ -103,6 +105,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) { + unsigned int nr_bytes = nr_sectors << 9; struct request *rq; unsigned long flags; int ret = 1; @@ -114,10 +117,14 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) spin_lock_irqsave(&ide_lock, flags); rq = HWGROUP(drive)->rq; - if (!nr_sectors) - nr_sectors = rq->hard_cur_sectors; + if (!nr_bytes) { + if (blk_pc_request(rq)) + nr_bytes = rq->data_len; + else + nr_bytes = rq->hard_cur_sectors << 9; + } - ret = __ide_end_request(drive, rq, uptodate, nr_sectors); + ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); spin_unlock_irqrestore(&ide_lock, flags); return ret; @@ -172,15 +179,6 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * memset(args, 0, sizeof(*args)); - if (drive->media != ide_disk) { - /* - * skip idedisk_pm_restore_pio and idedisk_pm_idle for ATAPI - * devices - */ - if (pm->pm_step == idedisk_pm_restore_pio) - pm->pm_step = ide_pm_restore_dma; - } - switch (pm->pm_step) { case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ if (drive->media != ide_disk) @@ -191,46 +189,51 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * return ide_stopped; } if (ide_id_has_flush_cache_ext(drive->id)) - args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; + args->tf.command = ATA_CMD_FLUSH_EXT; else - args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; - args->command_type = IDE_DRIVE_TASK_NO_DATA; - args->handler = &task_no_data_intr; - return do_rw_taskfile(drive, args); + args->tf.command = ATA_CMD_FLUSH; + goto out_do_tf; case idedisk_pm_standby: /* Suspend step 2 (standby) */ - args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1; - args->command_type = IDE_DRIVE_TASK_NO_DATA; - args->handler = &task_no_data_intr; - return do_rw_taskfile(drive, args); + args->tf.command = ATA_CMD_STANDBYNOW1; + goto out_do_tf; case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ - if (drive->hwif->tuneproc != NULL) - drive->hwif->tuneproc(drive, 255); - ide_complete_power_step(drive, rq, 0, 0); + ide_set_max_pio(drive); + /* + * skip idedisk_pm_idle for ATAPI devices + */ + if (drive->media != ide_disk) + pm->pm_step = ide_pm_restore_dma; + else + ide_complete_power_step(drive, rq, 0, 0); return ide_stopped; case idedisk_pm_idle: /* Resume step 2 (idle) */ - args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE; - args->command_type = IDE_DRIVE_TASK_NO_DATA; - args->handler = task_no_data_intr; - return do_rw_taskfile(drive, args); + args->tf.command = ATA_CMD_IDLEIMMEDIATE; + goto out_do_tf; case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ /* - * Right now, all we do is call hwif->ide_dma_check(drive), + * Right now, all we do is call ide_set_dma(drive), * we could be smarter and check for current xfer_speed * in struct drive etc... */ - if ((drive->id->capability & 1) == 0) + if (drive->hwif->dma_ops == NULL) break; - if (drive->hwif->ide_dma_check == NULL) - break; - drive->hwif->ide_dma_check(drive); + /* + * TODO: respect ->using_dma setting + */ + ide_set_dma(drive); break; } pm->pm_step = ide_pm_state_completed; return ide_stopped; + +out_do_tf: + args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + args->data_phase = TASKFILE_NO_DATA; + return do_rw_taskfile(drive, args); } /** @@ -252,39 +255,13 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, int uptodate, int nr_sectors) { unsigned long flags; - int ret = 1; + int ret; spin_lock_irqsave(&ide_lock, flags); - BUG_ON(!blk_rq_started(rq)); - - /* - * if failfast is set on a request, override number of sectors and - * complete the whole request right now - */ - if (blk_noretry_request(rq) && end_io_error(uptodate)) - nr_sectors = rq->hard_nr_sectors; - - if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) - rq->errors = -EIO; - - /* - * decide whether to reenable DMA -- 3 is a random magic for now, - * if we DMA timeout more than 3 times, just stay in PIO - */ - if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { - drive->state = 0; - HWGROUP(drive)->hwif->ide_dma_on(drive); - } - - if (!end_that_request_first(rq, uptodate, nr_sectors)) { - add_disk_randomness(rq->rq_disk); - if (blk_rq_tagged(rq)) - blk_queue_end_tag(drive->queue, rq); - end_that_request_last(rq, uptodate); - ret = 0; - } + ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); spin_unlock_irqrestore(&ide_lock, flags); + return ret; } EXPORT_SYMBOL_GPL(ide_end_dequeued_request); @@ -313,47 +290,12 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) drive->blocked = 0; blk_start_queue(drive->queue); } - blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; - end_that_request_last(rq, 1); + if (__blk_end_request(rq, 0, 0)) + BUG(); spin_unlock_irqrestore(&ide_lock, flags); } -/* - * FIXME: probably move this somewhere else, name is bad too :) - */ -u64 ide_get_error_location(ide_drive_t *drive, char *args) -{ - u32 high, low; - u8 hcyl, lcyl, sect; - u64 sector; - - high = 0; - hcyl = args[5]; - lcyl = args[4]; - sect = args[3]; - - if (ide_id_has_flush_cache_ext(drive->id)) { - low = (hcyl << 16) | (lcyl << 8) | sect; - HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); - high = ide_read_24(drive); - } else { - u8 cur = HWIF(drive)->INB(IDE_SELECT_REG); - if (cur & 0x40) { - high = cur & 0xf; - low = (hcyl << 16) | (lcyl << 8) | sect; - } else { - low = hcyl * drive->head * drive->sect; - low += lcyl * drive->sect; - low += sect - 1; - } - } - - sector = ((u64) high << 24) | low; - return sector; -} -EXPORT_SYMBOL(ide_get_error_location); - /** * ide_end_drive_cmd - end an explicit drive command * @drive: command @@ -370,7 +312,6 @@ EXPORT_SYMBOL(ide_get_error_location); void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) { - ide_hwif_t *hwif = HWIF(drive); unsigned long flags; struct request *rq; @@ -378,59 +319,22 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) rq = HWGROUP(drive)->rq; spin_unlock_irqrestore(&ide_lock, flags); - if (rq->cmd_type == REQ_TYPE_ATA_CMD) { - u8 *args = (u8 *) rq->buffer; - if (rq->errors == 0) - rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); + if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { + ide_task_t *task = (ide_task_t *)rq->special; - if (args) { - args[0] = stat; - args[1] = err; - args[2] = hwif->INB(IDE_NSECTOR_REG); - } - } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { - u8 *args = (u8 *) rq->buffer; if (rq->errors == 0) - rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); - - if (args) { - args[0] = stat; - args[1] = err; - args[2] = hwif->INB(IDE_NSECTOR_REG); - args[3] = hwif->INB(IDE_SECTOR_REG); - args[4] = hwif->INB(IDE_LCYL_REG); - args[5] = hwif->INB(IDE_HCYL_REG); - args[6] = hwif->INB(IDE_SELECT_REG); - } - } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { - ide_task_t *args = (ide_task_t *) rq->special; - if (rq->errors == 0) - rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); - - if (args) { - if (args->tf_in_flags.b.data) { - u16 data = hwif->INW(IDE_DATA_REG); - args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; - args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; - } - args->tfRegister[IDE_ERROR_OFFSET] = err; - /* be sure we're looking at the low order bits */ - hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); - args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); - args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); - args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); - args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); - args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); - args->tfRegister[IDE_STATUS_OFFSET] = stat; - - if (drive->addressing == 1) { - hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); - args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); - args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); - args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); - args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); - args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); - } + rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT); + + if (task) { + struct ide_taskfile *tf = &task->tf; + + tf->error = err; + tf->status = stat; + + drive->hwif->tp_ops->tf_read(drive, task); + + if (task->tf_flags & IDE_TFLAG_DYN) + kfree(task); } } else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->data; @@ -445,40 +349,16 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) } spin_lock_irqsave(&ide_lock, flags); - blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; rq->errors = err; - end_that_request_last(rq, !rq->errors); + if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), + blk_rq_bytes(rq)))) + BUG(); spin_unlock_irqrestore(&ide_lock, flags); } EXPORT_SYMBOL(ide_end_drive_cmd); -/** - * try_to_flush_leftover_data - flush junk - * @drive: drive to flush - * - * try_to_flush_leftover_data() is invoked in response to a drive - * unexpectedly having its DRQ_STAT bit set. As an alternative to - * resetting the drive, this routine tries to clear the condition - * by read a sector's worth of data from the drive. Of course, - * this may not help if the drive is *waiting* for data from *us*. - */ -static void try_to_flush_leftover_data (ide_drive_t *drive) -{ - int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; - - if (drive->media != ide_disk) - return; - while (i > 0) { - u32 buffer[16]; - u32 wcount = (i > 16) ? 16 : i; - - i -= wcount; - HWIF(drive)->ata_input_data(drive, buffer, wcount); - } -} - static void ide_kill_rq(ide_drive_t *drive, struct request *rq) { if (rq->rq_disk) { @@ -494,46 +374,53 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 { ide_hwif_t *hwif = drive->hwif; - if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { + if ((stat & ATA_BUSY) || ((stat & ATA_DF) && !drive->nowerr)) { /* other bits are useless when BUSY */ rq->errors |= ERROR_RESET; - } else if (stat & ERR_STAT) { + } else if (stat & ATA_ERR) { /* err has different meaning on cdrom and tape */ - if (err == ABRT_ERR) { + if (err == ATA_ABORTED) { if (drive->select.b.lba && - /* some newer drives don't support WIN_SPECIFY */ - hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) + /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ + hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) return ide_stopped; } else if ((err & BAD_CRC) == BAD_CRC) { /* UDMA crc error, just retry the operation */ drive->crc_count++; - } else if (err & (BBD_ERR | ECC_ERR)) { + } else if (err & (ATA_BBK | ATA_UNC)) { /* retries won't help these */ rq->errors = ERROR_MAX; - } else if (err & TRK0_ERR) { + } else if (err & ATA_TRK0NF) { /* help it find track zero */ rq->errors |= ERROR_RECAL; } } - if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0) - try_to_flush_leftover_data(drive); + if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && + (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { + int nsect = drive->mult_count ? drive->mult_count : 1; - if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) - /* force an abort */ - hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); + ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); + } - if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) + if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { ide_kill_rq(drive, rq); - else { - if ((rq->errors & ERROR_RESET) == ERROR_RESET) { - ++rq->errors; - return ide_do_reset(drive); - } - if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) - drive->special.b.recalibrate = 1; + return ide_stopped; + } + + if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) + rq->errors |= ERROR_RESET; + + if ((rq->errors & ERROR_RESET) == ERROR_RESET) { ++rq->errors; + return ide_do_reset(drive); } + + if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) + drive->special.b.recalibrate = 1; + + ++rq->errors; + return ide_stopped; } @@ -541,16 +428,16 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u { ide_hwif_t *hwif = drive->hwif; - if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { + if ((stat & ATA_BUSY) || ((stat & ATA_DF) && !drive->nowerr)) { /* other bits are useless when BUSY */ rq->errors |= ERROR_RESET; } else { /* add decoding error stuff */ } - if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) + if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) /* force an abort */ - hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); + hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); if (rq->errors >= ERROR_MAX) { ide_kill_rq(drive, rq); @@ -616,138 +503,26 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) EXPORT_SYMBOL_GPL(ide_error); -ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) -{ - if (drive->media != ide_disk) - rq->errors |= ERROR_RESET; - - ide_kill_rq(drive, rq); - - return ide_stopped; -} - -EXPORT_SYMBOL_GPL(__ide_abort); - -/** - * ide_abort - abort pending IDE operations - * @drive: drive the error occurred on - * @msg: message to report - * - * ide_abort kills and cleans up when we are about to do a - * host initiated reset on active commands. Longer term we - * want handlers to have sensible abort handling themselves - * - * This differs fundamentally from ide_error because in - * this case the command is doing just fine when we - * blow it away. - */ - -ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) -{ - struct request *rq; - - if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) - return ide_stopped; - - /* retry only "normal" I/O: */ - if (!blk_fs_request(rq)) { - rq->errors = 1; - ide_end_drive_cmd(drive, BUSY_STAT, 0); - return ide_stopped; - } - - if (rq->rq_disk) { - ide_driver_t *drv; - - drv = *(ide_driver_t **)rq->rq_disk->private_data; - return drv->abort(drive, rq); - } else - return __ide_abort(drive, rq); -} - -/** - * ide_cmd - issue a simple drive command - * @drive: drive the command is for - * @cmd: command byte - * @nsect: sector byte - * @handler: handler for the command completion - * - * Issue a simple drive command with interrupts. - * The drive must be selected beforehand. - */ - -static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, - ide_handler_t *handler) +static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { - ide_hwif_t *hwif = HWIF(drive); - if (IDE_CONTROL_REG) - hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ - SELECT_MASK(drive,0); - hwif->OUTB(nsect,IDE_NSECTOR_REG); - ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); + tf->nsect = drive->sect; + tf->lbal = drive->sect; + tf->lbam = drive->cyl; + tf->lbah = drive->cyl >> 8; + tf->device = ((drive->head - 1) | drive->select.all) & ~ATA_LBA; + tf->command = ATA_CMD_INIT_DEV_PARAMS; } -/** - * drive_cmd_intr - drive command completion interrupt - * @drive: drive the completion interrupt occurred on - * - * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. - * We do any necessary data reading and then wait for the drive to - * go non busy. At that point we may read the error data and complete - * the request - */ - -static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) -{ - struct request *rq = HWGROUP(drive)->rq; - ide_hwif_t *hwif = HWIF(drive); - u8 *args = (u8 *) rq->buffer; - u8 stat = hwif->INB(IDE_STATUS_REG); - int retries = 10; - - local_irq_enable_in_hardirq(); - if ((stat & DRQ_STAT) && args && args[3]) { - u8 io_32bit = drive->io_32bit; - drive->io_32bit = 0; - hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); - drive->io_32bit = io_32bit; - while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--) - udelay(100); - } - - if (!OK_STAT(stat, READY_STAT, BAD_STAT)) - return ide_error(drive, "drive_cmd", stat); - /* calls ide_end_drive_cmd */ - ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); - return ide_stopped; -} - -static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task) +static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { - task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; - task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect; - task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl; - task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8; - task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF; - task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY; - - task->handler = &set_geometry_intr; + tf->nsect = drive->sect; + tf->command = ATA_CMD_RESTORE; } -static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task) +static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { - task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; - task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE; - - task->handler = &recal_intr; -} - -static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task) -{ - task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req; - task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT; - - task->handler = &set_multmode_intr; + tf->nsect = drive->mult_req; + tf->command = ATA_CMD_SET_MULTI; } static ide_startstop_t ide_disk_special(ide_drive_t *drive) @@ -756,19 +531,17 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); - args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.data_phase = TASKFILE_NO_DATA; if (s->b.set_geometry) { s->b.set_geometry = 0; - ide_init_specify_cmd(drive, &args); + ide_tf_set_specify_cmd(drive, &args.tf); } else if (s->b.recalibrate) { s->b.recalibrate = 0; - ide_init_restore_cmd(drive, &args); + ide_tf_set_restore_cmd(drive, &args.tf); } else if (s->b.set_multmode) { s->b.set_multmode = 0; - if (drive->mult_req > drive->id->max_multsect) - drive->mult_req = drive->id->max_multsect; - ide_init_setmult_cmd(drive, &args); + ide_tf_set_setmult_cmd(drive, &args.tf); } else if (s->all) { int special = s->all; s->all = 0; @@ -776,18 +549,46 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) return ide_stopped; } + args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | + IDE_TFLAG_CUSTOM_HANDLER; + do_rw_taskfile(drive, &args); return ide_started; } +/* + * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away + */ +static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) +{ + switch (req_pio) { + case 202: + case 201: + case 200: + case 102: + case 101: + case 100: + return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; + case 9: + case 8: + return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; + case 7: + case 6: + return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; + default: + return 0; + } +} + /** * do_special - issue some special commands * @drive: drive the command is for * - * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT - * commands to a drive. It used to do much more, but has been scaled - * back. + * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, + * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. + * + * It used to do much more, but has been scaled back. */ static ide_startstop_t do_special (ide_drive_t *drive) @@ -798,9 +599,35 @@ static ide_startstop_t do_special (ide_drive_t *drive) printk("%s: do_special: 0x%02x\n", drive->name, s->all); #endif if (s->b.set_tune) { + ide_hwif_t *hwif = drive->hwif; + const struct ide_port_ops *port_ops = hwif->port_ops; + u8 req_pio = drive->tune_req; + s->b.set_tune = 0; - if (HWIF(drive)->tuneproc != NULL) - HWIF(drive)->tuneproc(drive, drive->tune_req); + + if (set_pio_mode_abuse(drive->hwif, req_pio)) { + /* + * take ide_lock for drive->[no_]unmask/[no_]io_32bit + */ + if (req_pio == 8 || req_pio == 9) { + unsigned long flags; + + spin_lock_irqsave(&ide_lock, flags); + port_ops->set_pio_mode(drive, req_pio); + spin_unlock_irqrestore(&ide_lock, flags); + } else + port_ops->set_pio_mode(drive, req_pio); + } else { + int keep_dma = drive->using_dma; + + ide_set_pio(drive, req_pio); + + if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { + if (keep_dma) + ide_dma_on(drive); + } + } + return ide_stopped; } else { if (drive->media == ide_disk) @@ -835,7 +662,8 @@ void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) ide_hwif_t *hwif = drive->hwif; hwif->nsect = hwif->nleft = rq->nr_sectors; - hwif->cursg = hwif->cursg_ofs = 0; + hwif->cursg_ofs = 0; + hwif->cursg = NULL; } EXPORT_SYMBOL_GPL(ide_init_sg_cmd); @@ -856,13 +684,10 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { ide_hwif_t *hwif = HWIF(drive); - if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { - ide_task_t *args = rq->special; - - if (!args) - goto done; + ide_task_t *task = rq->special; - hwif->data_phase = args->data_phase; + if (task) { + hwif->data_phase = task->data_phase; switch (hwif->data_phase) { case TASKFILE_MULTI_OUT: @@ -875,61 +700,9 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, break; } - if (args->tf_out_flags.all != 0) - return flagged_taskfile(drive, args); - return do_rw_taskfile(drive, args); - } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { - u8 *args = rq->buffer; - u8 sel; - - if (!args) - goto done; -#ifdef DEBUG - printk("%s: DRIVE_TASK_CMD ", drive->name); - printk("cmd=0x%02x ", args[0]); - printk("fr=0x%02x ", args[1]); - printk("ns=0x%02x ", args[2]); - printk("sc=0x%02x ", args[3]); - printk("lcyl=0x%02x ", args[4]); - printk("hcyl=0x%02x ", args[5]); - printk("sel=0x%02x\n", args[6]); -#endif - hwif->OUTB(args[1], IDE_FEATURE_REG); - hwif->OUTB(args[3], IDE_SECTOR_REG); - hwif->OUTB(args[4], IDE_LCYL_REG); - hwif->OUTB(args[5], IDE_HCYL_REG); - sel = (args[6] & ~0x10); - if (drive->select.b.unit) - sel |= 0x10; - hwif->OUTB(sel, IDE_SELECT_REG); - ide_cmd(drive, args[0], args[2], &drive_cmd_intr); - return ide_started; - } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { - u8 *args = rq->buffer; - - if (!args) - goto done; -#ifdef DEBUG - printk("%s: DRIVE_CMD ", drive->name); - printk("cmd=0x%02x ", args[0]); - printk("sc=0x%02x ", args[1]); - printk("fr=0x%02x ", args[2]); - printk("xx=0x%02x\n", args[3]); -#endif - if (args[0] == WIN_SMART) { - hwif->OUTB(0x4f, IDE_LCYL_REG); - hwif->OUTB(0xc2, IDE_HCYL_REG); - hwif->OUTB(args[2],IDE_FEATURE_REG); - hwif->OUTB(args[1],IDE_SECTOR_REG); - ide_cmd(drive, args[0], args[3], &drive_cmd_intr); - return ide_started; - } - hwif->OUTB(args[2],IDE_FEATURE_REG); - ide_cmd(drive, args[0], args[1], &drive_cmd_intr); - return ide_started; - } - -done: + return do_rw_taskfile(drive, task); + } + /* * NULL is actually a valid way of waiting for * all current requests to be flushed from the queue. @@ -937,12 +710,24 @@ done: #ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name); #endif - ide_end_drive_cmd(drive, - hwif->INB(IDE_STATUS_REG), - hwif->INB(IDE_ERROR_REG)); + ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), + ide_read_error(drive)); + return ide_stopped; } +static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) +{ + switch (rq->cmd[0]) { + case REQ_DRIVE_RESET: + return ide_do_reset(drive); + default: + blk_dump_rq_flags(rq, "ide_special_rq - bad request"); + ide_end_request(drive, 0, 0); + return ide_stopped; + } +} + static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->data; @@ -961,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) * the bus may be broken enough to walk on our toes at this * point. */ + ide_hwif_t *hwif = drive->hwif; int rc; #ifdef DEBUG_PM printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); #endif - rc = ide_wait_not_busy(HWIF(drive), 35000); + rc = ide_wait_not_busy(hwif, 35000); if (rc) printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); SELECT_DRIVE(drive); - HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); - rc = ide_wait_not_busy(HWIF(drive), 100000); + hwif->tp_ops->set_irq(hwif, 1); + rc = ide_wait_not_busy(hwif, 100000); if (rc) printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); } @@ -980,9 +766,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) * start_request - start of I/O and command issuing for IDE * * start_request() initiates handling of a new I/O request. It - * accepts commands and I/O (read/write) requests. It also does - * the final remapping for weird stuff like EZDrive. Once - * device mapper can work sector level the EZDrive stuff can go away + * accepts commands and I/O (read/write) requests. * * FIXME: this function needs a rename */ @@ -990,7 +774,6 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) { ide_startstop_t startstop; - sector_t block; BUG_ON(!blk_rq_started(rq)); @@ -1001,33 +784,30 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { + rq->cmd_flags |= REQ_FAILED; goto kill_rq; } - block = rq->sector; - if (blk_fs_request(rq) && - (drive->media == ide_disk || drive->media == ide_floppy)) { - block += drive->sect0; - } - /* Yecch - this will shift the entire interval, - possibly killing some innocent following sector */ - if (block == 0 && drive->remap_0_to_1 == 1) - block = 1; /* redirect MBR access to EZ-Drive partn table */ - if (blk_pm_request(rq)) ide_check_pm_state(drive, rq); SELECT_DRIVE(drive); - if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { + if (ide_wait_stat(&startstop, drive, drive->ready_stat, + ATA_BUSY | ATA_DRQ, WAIT_READY)) { printk(KERN_ERR "%s: drive not ready for command\n", drive->name); return startstop; } if (!drive->special.all) { ide_driver_t *drv; - if (rq->cmd_type == REQ_TYPE_ATA_CMD || - rq->cmd_type == REQ_TYPE_ATA_TASK || - rq->cmd_type == REQ_TYPE_ATA_TASKFILE) + /* + * We reset the drive so we need to issue a SETFEATURES. + * Do it _after_ do_special() restored device parameters. + */ + if (drive->current_speed == 0xff) + ide_config_drive_speed(drive, drive->desired_speed); + + if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) return execute_drive_cmd(drive, rq); else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->data; @@ -1040,10 +820,20 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) pm->pm_step == ide_pm_state_completed) ide_complete_pm_request(drive, rq); return startstop; - } + } else if (!rq->rq_disk && blk_special_request(rq)) + /* + * TODO: Once all ULDs have been modified to + * check for specific op codes rather than + * blindly accepting any special request, the + * check for ->rq_disk above may be replaced + * by a more suitable mechanism or even + * dropped entirely. + */ + return ide_special_rq(drive, rq); drv = *(ide_driver_t **)rq->rq_disk->private_data; - return drv->do_request(drive, rq, block); + + return drv->do_request(drive, rq, rq->sector); } return do_special(drive); kill_rq: @@ -1216,6 +1006,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) #endif /* so that ide_timer_expiry knows what to do */ hwgroup->sleeping = 1; + hwgroup->req_gen_timer = hwgroup->req_gen; mod_timer(&hwgroup->timer, sleep); /* we purposely leave hwgroup->busy==1 * while sleeping */ @@ -1234,11 +1025,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) } again: hwif = HWIF(drive); - if (hwgroup->hwif->sharing_irq && - hwif != hwgroup->hwif && - hwif->io_ports[IDE_CONTROL_OFFSET]) { - /* set nIEN for previous hwif */ - SELECT_INTERRUPT(drive); + if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { + /* + * set nIEN for previous hwif, drives in the + * quirk_list may not like intr setups/cleanups + */ + if (drive->quirk_list != 1) + hwif->tp_ops->set_irq(hwif, 0); } hwgroup->hwif = hwif; hwgroup->drive = drive; @@ -1312,7 +1105,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) /* * Passes the stuff to ide_do_request */ -void do_ide_request(request_queue_t *q) +void do_ide_request(struct request_queue *q) { ide_drive_t *drive = q->queuedata; @@ -1336,12 +1129,12 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) if (error < 0) { printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); - (void)HWIF(drive)->ide_dma_end(drive); + (void)hwif->dma_ops->dma_end(drive); ret = ide_error(drive, "dma timeout error", - hwif->INB(IDE_STATUS_REG)); + hwif->tp_ops->read_status(hwif)); } else { printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); - (void) hwif->ide_dma_timeout(drive); + hwif->dma_ops->dma_timeout(drive); } /* @@ -1351,7 +1144,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) */ drive->retry_pio++; drive->state = DMA_PIO_RETRY; - (void) hwif->ide_dma_off_quietly(drive); + ide_dma_off_quietly(drive); /* * un-busy drive etc (hwgroup->busy is cleared on return) and @@ -1401,7 +1194,8 @@ void ide_timer_expiry (unsigned long data) spin_lock_irqsave(&ide_lock, flags); - if ((handler = hwgroup->handler) == NULL) { + if (((handler = hwgroup->handler) == NULL) || + (hwgroup->req_gen != hwgroup->req_gen_timer)) { /* * Either a marginal timeout occurred * (got the interrupt just as timer expired), @@ -1429,6 +1223,7 @@ void ide_timer_expiry (unsigned long data) if ((wait = expiry(drive)) > 0) { /* reset timer */ hwgroup->timer.expires = jiffies + wait; + hwgroup->req_gen_timer = hwgroup->req_gen; add_timer(&hwgroup->timer); spin_unlock_irqrestore(&ide_lock, flags); return; @@ -1442,12 +1237,8 @@ void ide_timer_expiry (unsigned long data) */ spin_unlock(&ide_lock); hwif = HWIF(drive); -#if DISABLE_IRQ_NOSYNC - disable_irq_nosync(hwif->irq); -#else /* disable_irq_nosync ?? */ disable_irq(hwif->irq); -#endif /* DISABLE_IRQ_NOSYNC */ /* local CPU only, * as if we were handling an interrupt */ local_irq_disable(); @@ -1455,7 +1246,7 @@ void ide_timer_expiry (unsigned long data) startstop = handler(drive); } else if (drive_is_ready(drive)) { if (drive->waiting_for_dma) - (void) hwgroup->hwif->ide_dma_lostirq(drive); + hwif->dma_ops->dma_lost_irq(drive); (void)ide_ack_intr(hwif); printk(KERN_WARNING "%s: lost interrupt\n", drive->name); startstop = handler(drive); @@ -1464,7 +1255,8 @@ void ide_timer_expiry (unsigned long data) startstop = ide_dma_timeout_retry(drive, wait); } else startstop = - ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); + ide_error(drive, "irq timeout", + hwif->tp_ops->read_status(hwif)); } drive->service_time = jiffies - drive->service_start; spin_lock_irq(&ide_lock); @@ -1520,8 +1312,9 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) */ do { if (hwif->irq == irq) { - stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); - if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { + stat = hwif->tp_ops->read_status(hwif); + + if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { /* Try to not flood the console with msgs */ static unsigned long last_msgtime, count; ++count; @@ -1562,7 +1355,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) * on the hwgroup and the process begins again. */ -irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) +irqreturn_t ide_intr (int irq, void *dev_id) { unsigned long flags; ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; @@ -1596,7 +1389,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) * remove all the ifdef PCI crap */ #ifdef CONFIG_BLK_DEV_IDEPCI - if (hwif->pci_dev && !hwif->pci_dev->vendor) + if (hwif->chipset != ide_pci) #endif /* CONFIG_BLK_DEV_IDEPCI */ { /* @@ -1610,7 +1403,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) * Whack the status register, just in case * we have a leftover pending IRQ. */ - (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); + (void)hwif->tp_ops->read_status(hwif); #endif /* CONFIG_BLK_DEV_IDEPCI */ } spin_unlock_irqrestore(&ide_lock, flags); @@ -1643,9 +1436,21 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); } hwgroup->handler = NULL; + hwgroup->req_gen++; del_timer(&hwgroup->timer); spin_unlock(&ide_lock); + /* Some controllers might set DMA INTR no matter DMA or PIO; + * bmdma status might need to be cleared even for + * PIO interrupts to prevent spurious/lost irq. + */ + if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) + /* ide_dma_end() needs bmdma status for error checking. + * So, skip clearing bmdma status here and leave it + * to ide_dma_end() if this is dma interrupt. + */ + hwif->ide_dma_clear_irq(drive); + if (drive->unmask) local_irq_enable_in_hardirq(); /* service this interrupt, may set handler for next interrupt */ @@ -1674,90 +1479,65 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) } /** - * ide_init_drive_cmd - initialize a drive command request - * @rq: request object - * - * Initialize a request before we fill it in and send it down to - * ide_do_drive_cmd. Commands must be set up by this function. Right - * now it doesn't do a lot, but if that changes abusers will have a - * nasty surprise. - */ - -void ide_init_drive_cmd (struct request *rq) -{ - memset(rq, 0, sizeof(*rq)); - rq->cmd_type = REQ_TYPE_ATA_CMD; - rq->ref_count = 1; -} - -EXPORT_SYMBOL(ide_init_drive_cmd); - -/** * ide_do_drive_cmd - issue IDE special command * @drive: device to issue command * @rq: request to issue - * @action: action for processing * * This function issues a special IDE device request * onto the request queue. * - * If action is ide_wait, then the rq is queued at the end of the - * request queue, and the function sleeps until it has been processed. - * This is for use when invoked from an ioctl handler. - * - * If action is ide_preempt, then the rq is queued at the head of - * the request queue, displacing the currently-being-processed - * request and this function returns immediately without waiting - * for the new rq to be completed. This is VERY DANGEROUS, and is - * intended for careful use by the ATAPI tape/cdrom driver code. - * - * If action is ide_end, then the rq is queued at the end of the - * request queue, and the function returns immediately without waiting - * for the new rq to be completed. This is again intended for careful - * use by the ATAPI tape/cdrom driver code. + * the rq is queued at the head of the request queue, displacing + * the currently-being-processed request and this function + * returns immediately without waiting for the new rq to be + * completed. This is VERY DANGEROUS, and is intended for + * careful use by the ATAPI tape/cdrom driver code. */ - -int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) + +void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) { unsigned long flags; ide_hwgroup_t *hwgroup = HWGROUP(drive); - DECLARE_COMPLETION_ONSTACK(wait); - int where = ELEVATOR_INSERT_BACK, err; - int must_wait = (action == ide_wait || action == ide_head_wait); - - rq->errors = 0; - - /* - * we need to hold an extra reference to request for safe inspection - * after completion - */ - if (must_wait) { - rq->ref_count++; - rq->end_io_data = &wait; - rq->end_io = blk_end_sync_rq; - } spin_lock_irqsave(&ide_lock, flags); - if (action == ide_preempt) - hwgroup->rq = NULL; - if (action == ide_preempt || action == ide_head_wait) { - where = ELEVATOR_INSERT_FRONT; - rq->cmd_flags |= REQ_PREEMPT; - } - __elv_add_request(drive->queue, rq, where, 0); - ide_do_request(hwgroup, IDE_NO_IRQ); + hwgroup->rq = NULL; + __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1); + __generic_unplug_device(drive->queue); spin_unlock_irqrestore(&ide_lock, flags); +} - err = 0; - if (must_wait) { - wait_for_completion(&wait); - if (rq->errors) - err = -EIO; - - blk_put_request(rq); - } +EXPORT_SYMBOL(ide_do_drive_cmd); - return err; +void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) +{ + ide_hwif_t *hwif = drive->hwif; + ide_task_t task; + + memset(&task, 0, sizeof(task)); + task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | + IDE_TFLAG_OUT_FEATURE | tf_flags; + task.tf.feature = dma; /* Use PIO/DMA */ + task.tf.lbam = bcount & 0xff; + task.tf.lbah = (bcount >> 8) & 0xff; + + ide_tf_dump(drive->name, &task.tf); + hwif->tp_ops->set_irq(hwif, 1); + SELECT_MASK(drive, 0); + hwif->tp_ops->tf_load(drive, &task); } -EXPORT_SYMBOL(ide_do_drive_cmd); +EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); + +void ide_pad_transfer(ide_drive_t *drive, int write, int len) +{ + ide_hwif_t *hwif = drive->hwif; + u8 buf[4] = { 0 }; + + while (len > 0) { + if (write) + hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); + else + hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); + len -= 4; + } +} +EXPORT_SYMBOL_GPL(ide_pad_transfer);