2 * linux/drivers/ide/ide-taskfile.c Version 0.38 March 05, 2003
4 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
5 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2001-2002 Klaus Smolin
7 * IBM Storage Technology Division
8 * Copyright (C) 2003-2004 Bartlomiej Zolnierkiewicz
10 * The big the bad and the ugly.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/major.h>
22 #include <linux/errno.h>
23 #include <linux/genhd.h>
24 #include <linux/blkpg.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/hdreg.h>
29 #include <linux/ide.h>
30 #include <linux/bitops.h>
31 #include <linux/scatterlist.h>
33 #include <asm/byteorder.h>
35 #include <asm/uaccess.h>
38 static void ata_bswap_data (void *buffer, int wcount)
43 *p = *p << 8 | *p >> 8; p++;
44 *p = *p << 8 | *p >> 8; p++;
48 static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
50 HWIF(drive)->ata_input_data(drive, buffer, wcount);
52 ata_bswap_data(buffer, wcount);
55 static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
58 ata_bswap_data(buffer, wcount);
59 HWIF(drive)->ata_output_data(drive, buffer, wcount);
60 ata_bswap_data(buffer, wcount);
62 HWIF(drive)->ata_output_data(drive, buffer, wcount);
66 void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
68 ide_hwif_t *hwif = drive->hwif;
69 struct ide_taskfile *tf = &task->tf;
70 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
72 if (task->tf_flags & IDE_TFLAG_FLAGGED)
76 hwif->OUTB(drive->ctl, IDE_CONTROL_REG); /* clear nIEN */
78 if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
79 SELECT_MASK(drive, 0);
81 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
82 hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG);
84 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
85 hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG);
86 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
87 hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
88 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
89 hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
90 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
91 hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
92 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
93 hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);
95 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
96 hwif->OUTB(tf->feature, IDE_FEATURE_REG);
97 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
98 hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
99 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
100 hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
101 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
102 hwif->OUTB(tf->lbam, IDE_LCYL_REG);
103 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
104 hwif->OUTB(tf->lbah, IDE_HCYL_REG);
106 hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
109 EXPORT_SYMBOL_GPL(ide_tf_load);
111 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
115 memset(&args, 0, sizeof(ide_task_t));
116 args.tf.nsect = 0x01;
117 if (drive->media == ide_disk)
118 args.tf.command = WIN_IDENTIFY;
120 args.tf.command = WIN_PIDENTIFY;
121 args.command_type = IDE_DRIVE_TASK_IN;
122 args.data_phase = TASKFILE_IN;
123 args.handler = &task_in_intr;
124 return ide_raw_taskfile(drive, &args, buf);
127 static int inline task_dma_ok(ide_task_t *task)
129 if (task->tf_flags & IDE_TFLAG_FLAGGED)
132 switch (task->tf.command) {
133 case WIN_WRITEDMA_ONCE:
135 case WIN_WRITEDMA_EXT:
136 case WIN_READDMA_ONCE:
138 case WIN_READDMA_EXT:
139 case WIN_IDENTIFY_DMA:
146 ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
148 ide_hwif_t *hwif = HWIF(drive);
149 struct ide_taskfile *tf = &task->tf;
151 ide_tf_load(drive, task);
153 if (task->handler != NULL) {
154 if (task->prehandler != NULL) {
155 hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG);
156 ndelay(400); /* FIXME */
157 return task->prehandler(drive, task->rq);
159 ide_execute_command(drive, tf->command, task->handler, WAIT_WORSTCASE, NULL);
163 if (task_dma_ok(task) && drive->using_dma && !hwif->dma_setup(drive)) {
164 hwif->dma_exec_cmd(drive, tf->command);
165 hwif->dma_start(drive);
173 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
175 ide_startstop_t set_multmode_intr (ide_drive_t *drive)
177 ide_hwif_t *hwif = HWIF(drive);
180 if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
181 drive->mult_count = drive->mult_req;
183 drive->mult_req = drive->mult_count = 0;
184 drive->special.b.recalibrate = 1;
185 (void) ide_dump_status(drive, "set_multmode", stat);
191 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
193 ide_startstop_t set_geometry_intr (ide_drive_t *drive)
195 ide_hwif_t *hwif = HWIF(drive);
199 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
202 if (OK_STAT(stat, READY_STAT, BAD_STAT))
205 if (stat & (ERR_STAT|DRQ_STAT))
206 return ide_error(drive, "set_geometry_intr", stat);
208 BUG_ON(HWGROUP(drive)->handler != NULL);
209 ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
214 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
216 ide_startstop_t recal_intr (ide_drive_t *drive)
218 ide_hwif_t *hwif = HWIF(drive);
221 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
222 return ide_error(drive, "recal_intr", stat);
227 * Handler for commands without a data phase
229 ide_startstop_t task_no_data_intr (ide_drive_t *drive)
231 ide_task_t *args = HWGROUP(drive)->rq->special;
232 ide_hwif_t *hwif = HWIF(drive);
235 local_irq_enable_in_hardirq();
236 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
237 return ide_error(drive, "task_no_data_intr", stat);
238 /* calls ide_end_drive_cmd */
241 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
246 static u8 wait_drive_not_busy(ide_drive_t *drive)
248 ide_hwif_t *hwif = HWIF(drive);
253 * Last sector was transfered, wait until drive is ready.
254 * This can take up to 10 usec, but we will wait max 1 ms
255 * (drive_cmd_intr() waits that long).
257 for (retries = 0; retries < 100; retries++) {
258 if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
264 if (stat & BUSY_STAT)
265 printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
270 static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
272 ide_hwif_t *hwif = drive->hwif;
273 struct scatterlist *sg = hwif->sg_table;
274 struct scatterlist *cursg = hwif->cursg;
276 #ifdef CONFIG_HIGHMEM
288 page = sg_page(cursg);
289 offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
291 /* get the current page and offset */
292 page = nth_page(page, (offset >> PAGE_SHIFT));
295 #ifdef CONFIG_HIGHMEM
296 local_irq_save(flags);
298 buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
303 if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
304 hwif->cursg = sg_next(hwif->cursg);
308 /* do the actual data transfer */
310 taskfile_output_data(drive, buf, SECTOR_WORDS);
312 taskfile_input_data(drive, buf, SECTOR_WORDS);
314 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
315 #ifdef CONFIG_HIGHMEM
316 local_irq_restore(flags);
320 static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
324 nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
326 ide_pio_sector(drive, write);
329 static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
332 if (rq->bio) /* fs request */
335 touch_softlockup_watchdog();
337 switch (drive->hwif->data_phase) {
338 case TASKFILE_MULTI_IN:
339 case TASKFILE_MULTI_OUT:
340 ide_pio_multi(drive, write);
343 ide_pio_sector(drive, write);
348 static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
349 const char *s, u8 stat)
352 ide_hwif_t *hwif = drive->hwif;
353 int sectors = hwif->nsect - hwif->nleft;
355 switch (hwif->data_phase) {
363 case TASKFILE_MULTI_IN:
367 case TASKFILE_MULTI_OUT:
368 sectors -= drive->mult_count;
376 drv = *(ide_driver_t **)rq->rq_disk->private_data;
377 drv->end_request(drive, 1, sectors);
380 return ide_error(drive, s, stat);
383 static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
385 HWIF(drive)->cursg = NULL;
387 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
388 ide_task_t *task = rq->special;
390 if (task->tf_flags & IDE_TFLAG_FLAGGED) {
391 u8 err = drive->hwif->INB(IDE_ERROR_REG);
392 ide_end_drive_cmd(drive, stat, err);
400 drv = *(ide_driver_t **)rq->rq_disk->private_data;;
401 drv->end_request(drive, 1, rq->hard_nr_sectors);
403 ide_end_request(drive, 1, rq->hard_nr_sectors);
407 * Handler for command with PIO data-in phase (Read/Read Multiple).
409 ide_startstop_t task_in_intr (ide_drive_t *drive)
411 ide_hwif_t *hwif = drive->hwif;
412 struct request *rq = HWGROUP(drive)->rq;
413 u8 stat = hwif->INB(IDE_STATUS_REG);
415 /* new way for dealing with premature shared PCI interrupts */
416 if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
417 if (stat & (ERR_STAT | DRQ_STAT))
418 return task_error(drive, rq, __FUNCTION__, stat);
419 /* No data yet, so wait for another IRQ. */
420 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
424 ide_pio_datablock(drive, rq, 0);
426 /* If it was the last datablock check status and finish transfer. */
428 stat = wait_drive_not_busy(drive);
429 if (!OK_STAT(stat, 0, BAD_R_STAT))
430 return task_error(drive, rq, __FUNCTION__, stat);
431 task_end_request(drive, rq, stat);
435 /* Still data left to transfer. */
436 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
440 EXPORT_SYMBOL(task_in_intr);
443 * Handler for command with PIO data-out phase (Write/Write Multiple).
445 static ide_startstop_t task_out_intr (ide_drive_t *drive)
447 ide_hwif_t *hwif = drive->hwif;
448 struct request *rq = HWGROUP(drive)->rq;
449 u8 stat = hwif->INB(IDE_STATUS_REG);
451 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
452 return task_error(drive, rq, __FUNCTION__, stat);
454 /* Deal with unexpected ATA data phase. */
455 if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
456 return task_error(drive, rq, __FUNCTION__, stat);
459 task_end_request(drive, rq, stat);
463 /* Still data left to transfer. */
464 ide_pio_datablock(drive, rq, 1);
465 ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
470 ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
472 ide_startstop_t startstop;
474 if (ide_wait_stat(&startstop, drive, DATA_READY,
475 drive->bad_wstat, WAIT_DRQ)) {
476 printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
478 drive->hwif->data_phase ? "MULT" : "",
479 drive->addressing ? "_EXT" : "");
486 ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
487 ide_pio_datablock(drive, rq, 1);
491 EXPORT_SYMBOL(pre_task_out_intr);
493 static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
497 memset(&rq, 0, sizeof(rq));
499 rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
503 * (ks) We transfer currently only whole sectors.
504 * This is suffient for now. But, it would be great,
505 * if we would find a solution to transfer any size.
506 * To support special commands like READ LONG.
508 if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
510 rq.nr_sectors = (args->tf.hob_nsect << 8) | args->tf.nsect;
512 rq.nr_sectors = data_size / SECTOR_SIZE;
514 if (!rq.nr_sectors) {
515 printk(KERN_ERR "%s: in/out command without data\n",
520 rq.hard_nr_sectors = rq.nr_sectors;
521 rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
523 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
524 rq.cmd_flags |= REQ_RW;
529 return ide_do_drive_cmd(drive, &rq, ide_wait);
532 int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
534 return ide_diag_taskfile(drive, args, 0, buf);
537 EXPORT_SYMBOL(ide_raw_taskfile);
539 int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
541 task->command_type = IDE_DRIVE_TASK_NO_DATA;
542 task->data_phase = TASKFILE_NO_DATA;
543 task->handler = task_no_data_intr;
545 return ide_raw_taskfile(drive, task, NULL);
547 EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
549 #ifdef CONFIG_IDE_TASK_IOCTL
550 int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
552 ide_task_request_t *req_task;
557 int tasksize = sizeof(struct ide_task_request_s);
558 unsigned int taskin = 0;
559 unsigned int taskout = 0;
560 u8 io_32bit = drive->io_32bit;
561 char __user *buf = (char __user *)arg;
563 // printk("IDE Taskfile ...\n");
565 req_task = kzalloc(tasksize, GFP_KERNEL);
566 if (req_task == NULL) return -ENOMEM;
567 if (copy_from_user(req_task, buf, tasksize)) {
572 taskout = req_task->out_size;
573 taskin = req_task->in_size;
575 if (taskin > 65536 || taskout > 65536) {
581 int outtotal = tasksize;
582 outbuf = kzalloc(taskout, GFP_KERNEL);
583 if (outbuf == NULL) {
587 if (copy_from_user(outbuf, buf + outtotal, taskout)) {
594 int intotal = tasksize + taskout;
595 inbuf = kzalloc(taskin, GFP_KERNEL);
600 if (copy_from_user(inbuf, buf + intotal, taskin)) {
606 memset(&args, 0, sizeof(ide_task_t));
608 memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
609 memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
610 args.tf_in_flags = req_task->in_flags;
611 args.data_phase = req_task->data_phase;
612 args.command_type = req_task->req_cmd;
614 if (req_task->out_flags.all) {
615 args.tf_flags |= IDE_TFLAG_FLAGGED;
617 if (req_task->out_flags.b.data)
618 args.tf_flags |= IDE_TFLAG_OUT_DATA;
620 if (req_task->out_flags.b.nsector_hob)
621 args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
622 if (req_task->out_flags.b.sector_hob)
623 args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
624 if (req_task->out_flags.b.lcyl_hob)
625 args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
626 if (req_task->out_flags.b.hcyl_hob)
627 args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
629 if (req_task->out_flags.b.error_feature)
630 args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
631 if (req_task->out_flags.b.nsector)
632 args.tf_flags |= IDE_TFLAG_OUT_NSECT;
633 if (req_task->out_flags.b.sector)
634 args.tf_flags |= IDE_TFLAG_OUT_LBAL;
635 if (req_task->out_flags.b.lcyl)
636 args.tf_flags |= IDE_TFLAG_OUT_LBAM;
637 if (req_task->out_flags.b.hcyl)
638 args.tf_flags |= IDE_TFLAG_OUT_LBAH;
642 switch(req_task->data_phase) {
643 case TASKFILE_OUT_DMAQ:
644 case TASKFILE_OUT_DMA:
645 err = ide_diag_taskfile(drive, &args, taskout, outbuf);
647 case TASKFILE_IN_DMAQ:
648 case TASKFILE_IN_DMA:
649 err = ide_diag_taskfile(drive, &args, taskin, inbuf);
651 case TASKFILE_MULTI_OUT:
652 if (!drive->mult_count) {
653 /* (hs): give up if multcount is not set */
654 printk(KERN_ERR "%s: %s Multimode Write " \
655 "multcount is not set\n",
656 drive->name, __FUNCTION__);
662 args.prehandler = &pre_task_out_intr;
663 args.handler = &task_out_intr;
664 err = ide_diag_taskfile(drive, &args, taskout, outbuf);
666 case TASKFILE_MULTI_IN:
667 if (!drive->mult_count) {
668 /* (hs): give up if multcount is not set */
669 printk(KERN_ERR "%s: %s Multimode Read failure " \
670 "multcount is not set\n",
671 drive->name, __FUNCTION__);
677 args.handler = &task_in_intr;
678 err = ide_diag_taskfile(drive, &args, taskin, inbuf);
680 case TASKFILE_NO_DATA:
681 args.handler = &task_no_data_intr;
682 err = ide_diag_taskfile(drive, &args, 0, NULL);
689 memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
690 memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
691 req_task->in_flags = args.tf_in_flags;
693 if (copy_to_user(buf, req_task, tasksize)) {
698 int outtotal = tasksize;
699 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
705 int intotal = tasksize + taskout;
706 if (copy_to_user(buf + intotal, inbuf, taskin)) {
716 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
718 drive->io_32bit = io_32bit;
724 int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
731 memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
732 ide_init_drive_cmd(&rq);
738 return ide_do_drive_cmd(drive, &rq, ide_wait);
741 int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
744 u8 args[4], *argbuf = args;
748 struct ide_taskfile *tf = &tfargs.tf;
750 if (NULL == (void *) arg) {
752 ide_init_drive_cmd(&rq);
753 return ide_do_drive_cmd(drive, &rq, ide_wait);
756 if (copy_from_user(args, (void __user *)arg, 4))
759 memset(&tfargs, 0, sizeof(ide_task_t));
760 tf->feature = args[2];
763 tf->command = args[0];
766 argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
767 argbuf = kzalloc(argsize, GFP_KERNEL);
771 if (set_transfer(drive, &tfargs)) {
773 if (ide_ata66_check(drive, &tfargs))
777 err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
779 if (!err && xfer_rate) {
780 /* active-retuning-calls future */
781 ide_set_xfer_rate(drive, xfer_rate);
782 ide_driveid_update(drive);
785 if (copy_to_user((void __user *)arg, argbuf, argsize))
792 static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf)
796 ide_init_drive_cmd(&rq);
797 rq.cmd_type = REQ_TYPE_ATA_TASK;
799 return ide_do_drive_cmd(drive, &rq, ide_wait);
802 int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
804 void __user *p = (void __user *)arg;
806 u8 args[7], *argbuf = args;
809 if (copy_from_user(args, p, 7))
811 err = ide_wait_cmd_task(drive, argbuf);
812 if (copy_to_user(p, argbuf, argsize))
818 * NOTICE: This is additions from IBM to provide a discrete interface,
819 * for selective taskregister access operations. Nice JOB Klaus!!!
820 * Glad to be able to work and co-develop this with you and IBM.
822 ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
824 if (task->data_phase == TASKFILE_MULTI_IN ||
825 task->data_phase == TASKFILE_MULTI_OUT) {
826 if (!drive->mult_count) {
827 printk(KERN_ERR "%s: multimode not set!\n", drive->name);
833 * (ks) Check taskfile in flags.
834 * If set, then execute as it is defined.
835 * If not set, then define default settings.
836 * The default values are:
837 * read all taskfile registers (except data)
838 * read the hob registers (sector, nsector, lcyl, hcyl)
840 if (task->tf_in_flags.all == 0) {
841 task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
842 if (drive->addressing == 1)
843 task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
846 return do_rw_taskfile(drive, task);