int uptodate, unsigned int nr_bytes, int dequeue)
{
int ret = 1;
+ int error = 0;
+
+ if (uptodate <= 0)
+ error = uptodate ? uptodate : -EIO;
/*
* if failfast is set on a request, override number of sectors and
* complete the whole request right now
*/
- if (blk_noretry_request(rq) && end_io_error(uptodate))
+ if (blk_noretry_request(rq) && error)
nr_bytes = rq->hard_nr_sectors << 9;
- if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
+ if (!blk_fs_request(rq) && error && !rq->errors)
rq->errors = -EIO;
/*
ide_dma_on(drive);
}
- if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
- add_disk_randomness(rq->rq_disk);
- if (dequeue) {
- if (!list_empty(&rq->queuelist))
- blkdev_dequeue_request(rq);
+ if (!__blk_end_request(rq, error, nr_bytes)) {
+ if (dequeue)
HWGROUP(drive)->rq = NULL;
- }
- end_that_request_last(rq, uptodate);
ret = 0;
}
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
- if (drive->hwif->dma_host_set == NULL)
+ if (drive->hwif->dma_ops == NULL)
break;
/*
* TODO: respect ->using_dma setting
return ide_stopped;
out_do_tf:
- args->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
+ args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
args->data_phase = TASKFILE_NO_DATA;
return do_rw_taskfile(drive, args);
}
drive->blocked = 0;
blk_start_queue(drive->queue);
}
- blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
- end_that_request_last(rq, 1);
+ if (__blk_end_request(rq, 0, 0))
+ BUG();
spin_unlock_irqrestore(&ide_lock, flags);
}
-void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_taskfile *tf = &task->tf;
-
- if (task->tf_flags & IDE_TFLAG_IN_DATA) {
- u16 data = hwif->INW(IDE_DATA_REG);
-
- tf->data = data & 0xff;
- tf->hob_data = (data >> 8) & 0xff;
- }
-
- /* be sure we're looking at the low order bits */
- hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
-
- if (task->tf_flags & IDE_TFLAG_IN_NSECT)
- tf->nsect = hwif->INB(IDE_NSECTOR_REG);
- if (task->tf_flags & IDE_TFLAG_IN_LBAL)
- tf->lbal = hwif->INB(IDE_SECTOR_REG);
- if (task->tf_flags & IDE_TFLAG_IN_LBAM)
- tf->lbam = hwif->INB(IDE_LCYL_REG);
- if (task->tf_flags & IDE_TFLAG_IN_LBAH)
- tf->lbah = hwif->INB(IDE_HCYL_REG);
- if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
- tf->device = hwif->INB(IDE_SELECT_REG);
-
- if (task->tf_flags & IDE_TFLAG_LBA48) {
- hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG);
-
- if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
- tf->hob_feature = hwif->INB(IDE_FEATURE_REG);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
- tf->hob_nsect = hwif->INB(IDE_NSECTOR_REG);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
- tf->hob_lbal = hwif->INB(IDE_SECTOR_REG);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
- tf->hob_lbam = hwif->INB(IDE_LCYL_REG);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
- tf->hob_lbah = hwif->INB(IDE_HCYL_REG);
- }
-}
-
/**
* ide_end_drive_cmd - end an explicit drive command
* @drive: command
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
{
- ide_hwif_t *hwif = HWIF(drive);
unsigned long flags;
struct request *rq;
rq = HWGROUP(drive)->rq;
spin_unlock_irqrestore(&ide_lock, flags);
- if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
- u8 *args = (u8 *) rq->buffer;
- if (rq->errors == 0)
- rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ ide_task_t *task = (ide_task_t *)rq->special;
- if (args) {
- args[0] = stat;
- args[1] = err;
- args[2] = hwif->INB(IDE_NSECTOR_REG);
- }
- } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- ide_task_t *args = (ide_task_t *) rq->special;
if (rq->errors == 0)
- rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
-
- if (args) {
- struct ide_taskfile *tf = &args->tf;
+ rq->errors = !OK_STAT(stat, READY_STAT, BAD_STAT);
+
+ if (task) {
+ struct ide_taskfile *tf = &task->tf;
tf->error = err;
tf->status = stat;
- args->tf_flags |= (IDE_TFLAG_IN_TF|IDE_TFLAG_IN_DEVICE);
- if (args->tf_flags & IDE_TFLAG_LBA48)
- args->tf_flags |= IDE_TFLAG_IN_HOB;
+ drive->hwif->tp_ops->tf_read(drive, task);
- ide_tf_read(drive, args);
+ if (task->tf_flags & IDE_TFLAG_DYN)
+ kfree(task);
}
} else if (blk_pm_request(rq)) {
struct request_pm_state *pm = rq->data;
}
spin_lock_irqsave(&ide_lock, flags);
- blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
rq->errors = err;
- end_that_request_last(rq, !rq->errors);
+ if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
+ blk_rq_bytes(rq))))
+ BUG();
spin_unlock_irqrestore(&ide_lock, flags);
}
EXPORT_SYMBOL(ide_end_drive_cmd);
-/**
- * try_to_flush_leftover_data - flush junk
- * @drive: drive to flush
- *
- * try_to_flush_leftover_data() is invoked in response to a drive
- * unexpectedly having its DRQ_STAT bit set. As an alternative to
- * resetting the drive, this routine tries to clear the condition
- * by read a sector's worth of data from the drive. Of course,
- * this may not help if the drive is *waiting* for data from *us*.
- */
-static void try_to_flush_leftover_data (ide_drive_t *drive)
-{
- int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
-
- if (drive->media != ide_disk)
- return;
- while (i > 0) {
- u32 buffer[16];
- u32 wcount = (i > 16) ? 16 : i;
-
- i -= wcount;
- HWIF(drive)->ata_input_data(drive, buffer, wcount);
- }
-}
-
static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
if (rq->rq_disk) {
if (err == ABRT_ERR) {
if (drive->select.b.lba &&
/* some newer drives don't support WIN_SPECIFY */
- hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY)
+ hwif->tp_ops->read_status(hwif) == WIN_SPECIFY)
return ide_stopped;
} else if ((err & BAD_CRC) == BAD_CRC) {
/* UDMA crc error, just retry the operation */
}
if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ &&
- (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0)
- try_to_flush_leftover_data(drive);
+ (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
+ int nsect = drive->mult_count ? drive->mult_count : 1;
+
+ ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
+ }
if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
ide_kill_rq(drive, rq);
return ide_stopped;
}
- if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
rq->errors |= ERROR_RESET;
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
/* add decoding error stuff */
}
- if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
+ hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
if (rq->errors >= ERROR_MAX) {
ide_kill_rq(drive, rq);
EXPORT_SYMBOL_GPL(ide_error);
-ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
-{
- if (drive->media != ide_disk)
- rq->errors |= ERROR_RESET;
-
- ide_kill_rq(drive, rq);
-
- return ide_stopped;
-}
-
-EXPORT_SYMBOL_GPL(__ide_abort);
-
-/**
- * ide_abort - abort pending IDE operations
- * @drive: drive the error occurred on
- * @msg: message to report
- *
- * ide_abort kills and cleans up when we are about to do a
- * host initiated reset on active commands. Longer term we
- * want handlers to have sensible abort handling themselves
- *
- * This differs fundamentally from ide_error because in
- * this case the command is doing just fine when we
- * blow it away.
- */
-
-ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
-{
- struct request *rq;
-
- if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
- return ide_stopped;
-
- /* retry only "normal" I/O: */
- if (!blk_fs_request(rq)) {
- rq->errors = 1;
- ide_end_drive_cmd(drive, BUSY_STAT, 0);
- return ide_stopped;
- }
-
- if (rq->rq_disk) {
- ide_driver_t *drv;
-
- drv = *(ide_driver_t **)rq->rq_disk->private_data;
- return drv->abort(drive, rq);
- } else
- return __ide_abort(drive, rq);
-}
-
-/**
- * drive_cmd_intr - drive command completion interrupt
- * @drive: drive the completion interrupt occurred on
- *
- * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
- * We do any necessary data reading and then wait for the drive to
- * go non busy. At that point we may read the error data and complete
- * the request
- */
-
-static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
-{
- struct request *rq = HWGROUP(drive)->rq;
- ide_hwif_t *hwif = HWIF(drive);
- u8 *args = (u8 *) rq->buffer;
- u8 stat = hwif->INB(IDE_STATUS_REG);
- int retries = 10;
-
- local_irq_enable_in_hardirq();
- if (rq->cmd_type == REQ_TYPE_ATA_CMD &&
- (stat & DRQ_STAT) && args && args[3]) {
- u8 io_32bit = drive->io_32bit;
- drive->io_32bit = 0;
- hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
- drive->io_32bit = io_32bit;
- while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
- udelay(100);
- }
-
- if (!OK_STAT(stat, READY_STAT, BAD_STAT))
- return ide_error(drive, "drive_cmd", stat);
- /* calls ide_end_drive_cmd */
- ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
- return ide_stopped;
-}
-
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
{
tf->nsect = drive->sect;
return ide_stopped;
}
- args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
IDE_TFLAG_CUSTOM_HANDLER;
do_rw_taskfile(drive, &args);
#endif
if (s->b.set_tune) {
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
u8 req_pio = drive->tune_req;
s->b.set_tune = 0;
if (set_pio_mode_abuse(drive->hwif, req_pio)) {
-
- if (hwif->set_pio_mode == NULL)
- return ide_stopped;
-
/*
* take ide_lock for drive->[no_]unmask/[no_]io_32bit
*/
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
- hwif->set_pio_mode(drive, req_pio);
+ port_ops->set_pio_mode(drive, req_pio);
spin_unlock_irqrestore(&ide_lock, flags);
} else
- hwif->set_pio_mode(drive, req_pio);
+ port_ops->set_pio_mode(drive, req_pio);
} else {
int keep_dma = drive->using_dma;
struct request *rq)
{
ide_hwif_t *hwif = HWIF(drive);
- u8 *args = rq->buffer;
- ide_task_t ltask;
- struct ide_taskfile *tf = <ask.tf;
-
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- ide_task_t *task = rq->special;
-
- if (task == NULL)
- goto done;
+ ide_task_t *task = rq->special;
+ if (task) {
hwif->data_phase = task->data_phase;
switch (hwif->data_phase) {
return do_rw_taskfile(drive, task);
}
- if (args == NULL)
- goto done;
-
- memset(<ask, 0, sizeof(ltask));
- if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
-#ifdef DEBUG
- printk("%s: DRIVE_CMD\n", drive->name);
-#endif
- tf->feature = args[2];
- if (args[0] == WIN_SMART) {
- tf->nsect = args[3];
- tf->lbal = args[1];
- tf->lbam = 0x4f;
- tf->lbah = 0xc2;
- ltask.tf_flags = IDE_TFLAG_OUT_TF;
- } else {
- tf->nsect = args[1];
- ltask.tf_flags = IDE_TFLAG_OUT_FEATURE |
- IDE_TFLAG_OUT_NSECT;
- }
- }
- tf->command = args[0];
- ide_tf_load(drive, <ask);
- ide_execute_command(drive, args[0], &drive_cmd_intr, WAIT_WORSTCASE, NULL);
- return ide_started;
-
-done:
/*
* NULL is actually a valid way of waiting for
* all current requests to be flushed from the queue.
#ifdef DEBUG
printk("%s: DRIVE_CMD (null)\n", drive->name);
#endif
- ide_end_drive_cmd(drive,
- hwif->INB(IDE_STATUS_REG),
- hwif->INB(IDE_ERROR_REG));
+ ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
+ ide_read_error(drive));
+
return ide_stopped;
}
+static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
+{
+ switch (rq->cmd[0]) {
+ case REQ_DRIVE_RESET:
+ return ide_do_reset(drive);
+ default:
+ blk_dump_rq_flags(rq, "ide_special_rq - bad request");
+ ide_end_request(drive, 0, 0);
+ return ide_stopped;
+ }
+}
+
static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
* the bus may be broken enough to walk on our toes at this
* point.
*/
+ ide_hwif_t *hwif = drive->hwif;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
- rc = ide_wait_not_busy(HWIF(drive), 35000);
+ rc = ide_wait_not_busy(hwif, 35000);
if (rc)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
SELECT_DRIVE(drive);
- ide_set_irq(drive, 1);
- rc = ide_wait_not_busy(HWIF(drive), 100000);
+ hwif->tp_ops->set_irq(hwif, 1);
+ rc = ide_wait_not_busy(hwif, 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
}
if (drive->current_speed == 0xff)
ide_config_drive_speed(drive, drive->desired_speed);
- if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
- rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
return execute_drive_cmd(drive, rq);
else if (blk_pm_request(rq)) {
struct request_pm_state *pm = rq->data;
pm->pm_step == ide_pm_state_completed)
ide_complete_pm_request(drive, rq);
return startstop;
- }
+ } else if (!rq->rq_disk && blk_special_request(rq))
+ /*
+ * TODO: Once all ULDs have been modified to
+ * check for specific op codes rather than
+ * blindly accepting any special request, the
+ * check for ->rq_disk above may be replaced
+ * by a more suitable mechanism or even
+ * dropped entirely.
+ */
+ return ide_special_rq(drive, rq);
drv = *(ide_driver_t **)rq->rq_disk->private_data;
return drv->do_request(drive, rq, block);
* quirk_list may not like intr setups/cleanups
*/
if (drive->quirk_list != 1)
- ide_set_irq(drive, 0);
+ hwif->tp_ops->set_irq(hwif, 0);
}
hwgroup->hwif = hwif;
hwgroup->drive = drive;
if (error < 0) {
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
- (void)HWIF(drive)->ide_dma_end(drive);
+ (void)hwif->dma_ops->dma_end(drive);
ret = ide_error(drive, "dma timeout error",
- hwif->INB(IDE_STATUS_REG));
+ hwif->tp_ops->read_status(hwif));
} else {
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
- hwif->dma_timeout(drive);
+ hwif->dma_ops->dma_timeout(drive);
}
/*
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
if (drive->waiting_for_dma)
- hwgroup->hwif->dma_lost_irq(drive);
+ hwif->dma_ops->dma_lost_irq(drive);
(void)ide_ack_intr(hwif);
printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
startstop = handler(drive);
startstop = ide_dma_timeout_retry(drive, wait);
} else
startstop =
- ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
+ ide_error(drive, "irq timeout",
+ hwif->tp_ops->read_status(hwif));
}
drive->service_time = jiffies - drive->service_start;
spin_lock_irq(&ide_lock);
*/
do {
if (hwif->irq == irq) {
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->tp_ops->read_status(hwif);
+
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
static unsigned long last_msgtime, count;
* remove all the ifdef PCI crap
*/
#ifdef CONFIG_BLK_DEV_IDEPCI
- if (hwif->pci_dev && !hwif->pci_dev->vendor)
+ if (hwif->chipset != ide_pci)
#endif /* CONFIG_BLK_DEV_IDEPCI */
{
/*
* Whack the status register, just in case
* we have a leftover pending IRQ.
*/
- (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ (void)hwif->tp_ops->read_status(hwif);
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
spin_unlock_irqrestore(&ide_lock, flags);
}
/**
- * ide_init_drive_cmd - initialize a drive command request
- * @rq: request object
- *
- * Initialize a request before we fill it in and send it down to
- * ide_do_drive_cmd. Commands must be set up by this function. Right
- * now it doesn't do a lot, but if that changes abusers will have a
- * nasty surprise.
- */
-
-void ide_init_drive_cmd (struct request *rq)
-{
- memset(rq, 0, sizeof(*rq));
- rq->cmd_type = REQ_TYPE_ATA_CMD;
- rq->ref_count = 1;
-}
-
-EXPORT_SYMBOL(ide_init_drive_cmd);
-
-/**
* ide_do_drive_cmd - issue IDE special command
* @drive: device to issue command
* @rq: request to issue
- * @action: action for processing
*
* This function issues a special IDE device request
* onto the request queue.
*
- * If action is ide_wait, then the rq is queued at the end of the
- * request queue, and the function sleeps until it has been processed.
- * This is for use when invoked from an ioctl handler.
- *
- * If action is ide_preempt, then the rq is queued at the head of
- * the request queue, displacing the currently-being-processed
- * request and this function returns immediately without waiting
- * for the new rq to be completed. This is VERY DANGEROUS, and is
- * intended for careful use by the ATAPI tape/cdrom driver code.
- *
- * If action is ide_end, then the rq is queued at the end of the
- * request queue, and the function returns immediately without waiting
- * for the new rq to be completed. This is again intended for careful
- * use by the ATAPI tape/cdrom driver code.
+ * the rq is queued at the head of the request queue, displacing
+ * the currently-being-processed request and this function
+ * returns immediately without waiting for the new rq to be
+ * completed. This is VERY DANGEROUS, and is intended for
+ * careful use by the ATAPI tape/cdrom driver code.
*/
-
-int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
+
+void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
{
unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- DECLARE_COMPLETION_ONSTACK(wait);
- int where = ELEVATOR_INSERT_BACK, err;
- int must_wait = (action == ide_wait || action == ide_head_wait);
-
- rq->errors = 0;
-
- /*
- * we need to hold an extra reference to request for safe inspection
- * after completion
- */
- if (must_wait) {
- rq->ref_count++;
- rq->end_io_data = &wait;
- rq->end_io = blk_end_sync_rq;
- }
spin_lock_irqsave(&ide_lock, flags);
- if (action == ide_preempt)
- hwgroup->rq = NULL;
- if (action == ide_preempt || action == ide_head_wait) {
- where = ELEVATOR_INSERT_FRONT;
- rq->cmd_flags |= REQ_PREEMPT;
- }
- __elv_add_request(drive->queue, rq, where, 0);
- ide_do_request(hwgroup, IDE_NO_IRQ);
+ hwgroup->rq = NULL;
+ __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1);
+ __generic_unplug_device(drive->queue);
spin_unlock_irqrestore(&ide_lock, flags);
-
- err = 0;
- if (must_wait) {
- wait_for_completion(&wait);
- if (rq->errors)
- err = -EIO;
-
- blk_put_request(rq);
- }
-
- return err;
}
EXPORT_SYMBOL(ide_do_drive_cmd);
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
{
+ ide_hwif_t *hwif = drive->hwif;
ide_task_t task;
memset(&task, 0, sizeof(task));
task.tf.lbam = bcount & 0xff;
task.tf.lbah = (bcount >> 8) & 0xff;
- ide_tf_load(drive, &task);
+ ide_tf_dump(drive->name, &task.tf);
+ hwif->tp_ops->set_irq(hwif, 1);
+ SELECT_MASK(drive, 0);
+ hwif->tp_ops->tf_load(drive, &task);
}
EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
+
+void ide_pad_transfer(ide_drive_t *drive, int write, int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 buf[4] = { 0 };
+
+ while (len > 0) {
+ if (write)
+ hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
+ else
+ hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
+ len -= 4;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_pad_transfer);