X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fdirect-io.c;h=8b10b87dc01a294845aac2afd1a2d22ae5a269b2;hb=7663dacd926584093dfc350892792054692b6cb3;hp=b296942ff7d5865cc90d107edf6823cc9bd674d8;hpb=17a7b1d74b1207f8f1af40b5d184989076d08f8b;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/direct-io.c b/fs/direct-io.c index b296942..8b10b87 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -5,11 +5,11 @@ * * O_DIRECT * - * 04Jul2002 akpm@zip.com.au + * 04Jul2002 Andrew Morton * Initial version * 11Sep2002 janetinc@us.ibm.com * added readv/writev support. - * 29Oct2002 akpm@zip.com.au + * 29Oct2002 Andrew Morton * rewrote bio_add_page() support. * 30Oct2002 pbadari@us.ibm.com * added support for non-aligned IO. @@ -122,8 +122,7 @@ struct dio { /* BIO completion state */ spinlock_t bio_lock; /* protects BIO fields below */ - int bio_count; /* nr bios to be completed */ - int bios_in_flight; /* nr bios in flight */ + unsigned long refcount; /* direct_io_worker() and bios */ struct bio *bio_list; /* singly linked via bi_private */ struct task_struct *waiter; /* waiting task (NULL if none) */ @@ -151,20 +150,14 @@ static int dio_refill_pages(struct dio *dio) int nr_pages; nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES); - down_read(¤t->mm->mmap_sem); - ret = get_user_pages( - current, /* Task for fault acounting */ - current->mm, /* whose pages? */ + ret = get_user_pages_fast( dio->curr_user_address, /* Where from? */ nr_pages, /* How many pages? */ dio->rw == READ, /* Write to memory? */ - 0, /* force (?) */ - &dio->pages[0], - NULL); /* vmas */ - up_read(¤t->mm->mmap_sem); + &dio->pages[0]); /* Put results here */ if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) { - struct page *page = ZERO_PAGE(dio->curr_user_address); + struct page *page = ZERO_PAGE(0); /* * A memory fault, but the filesystem has some outstanding * mapped blocks. We need to use those blocks up to avoid @@ -227,6 +220,15 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) { ssize_t transferred = 0; + /* + * AIO submission can race with bio completion to get here while + * expecting to have the last io completed by bio completion. + * In that case -EIOCBQUEUED is in fact not an error we want + * to preserve through this call. + */ + if (ret == -EIOCBQUEUED) + ret = 0; + if (dio->result) { transferred = dio->result; @@ -252,64 +254,30 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) return ret; } -/* - * Called when a BIO has been processed. If the count goes to zero then IO is - * complete and we can signal this to the AIO layer. - */ -static void finished_one_bio(struct dio *dio) -{ - unsigned long flags; - - spin_lock_irqsave(&dio->bio_lock, flags); - if (dio->bio_count == 1) { - if (dio->is_async) { - int ret; - - /* - * Last reference to the dio is going away. - * Drop spinlock and complete the DIO. - */ - spin_unlock_irqrestore(&dio->bio_lock, flags); - - ret = dio_complete(dio, dio->iocb->ki_pos, 0); - - /* Complete AIO later if falling back to buffered i/o */ - if (dio->result == dio->size || - ((dio->rw == READ) && dio->result)) { - aio_complete(dio->iocb, ret, 0); - kfree(dio); - return; - } else { - /* - * Falling back to buffered - */ - spin_lock_irqsave(&dio->bio_lock, flags); - dio->bio_count--; - if (dio->waiter) - wake_up_process(dio->waiter); - spin_unlock_irqrestore(&dio->bio_lock, flags); - return; - } - } - } - dio->bio_count--; - spin_unlock_irqrestore(&dio->bio_lock, flags); -} - static int dio_bio_complete(struct dio *dio, struct bio *bio); /* * Asynchronous IO callback. */ -static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) +static void dio_bio_end_aio(struct bio *bio, int error) { struct dio *dio = bio->bi_private; - - if (bio->bi_size) - return 1; + unsigned long remaining; + unsigned long flags; /* cleanup the bio */ dio_bio_complete(dio, bio); - return 0; + + spin_lock_irqsave(&dio->bio_lock, flags); + remaining = --dio->refcount; + if (remaining == 1 && dio->waiter) + wake_up_process(dio->waiter); + spin_unlock_irqrestore(&dio->bio_lock, flags); + + if (remaining == 0) { + int ret = dio_complete(dio, dio->iocb->ki_pos, 0); + aio_complete(dio->iocb, ret, 0); + kfree(dio); + } } /* @@ -319,22 +287,17 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) * During I/O bi_private points at the dio. After I/O, bi_private is used to * implement a singly-linked list of completed BIOs, at dio->bio_list. */ -static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error) +static void dio_bio_end_io(struct bio *bio, int error) { struct dio *dio = bio->bi_private; unsigned long flags; - if (bio->bi_size) - return 1; - spin_lock_irqsave(&dio->bio_lock, flags); bio->bi_private = dio->bio_list; dio->bio_list = bio; - dio->bios_in_flight--; - if (dio->waiter && dio->bios_in_flight == 0) + if (--dio->refcount == 1 && dio->waiter) wake_up_process(dio->waiter); spin_unlock_irqrestore(&dio->bio_lock, flags); - return 0; } static int @@ -344,8 +307,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev, struct bio *bio; bio = bio_alloc(GFP_KERNEL, nr_vecs); - if (bio == NULL) - return -ENOMEM; bio->bi_bdev = bdev; bio->bi_sector = first_sector; @@ -362,6 +323,8 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev, * In the AIO read case we speculatively dirty the pages before starting IO. * During IO completion, any of these pages which happen to have been written * back will be redirtied by bio_check_pages_dirty(). + * + * bios hold a dio reference between submit_bio and ->end_io. */ static void dio_bio_submit(struct dio *dio) { @@ -369,12 +332,14 @@ static void dio_bio_submit(struct dio *dio) unsigned long flags; bio->bi_private = dio; + spin_lock_irqsave(&dio->bio_lock, flags); - dio->bio_count++; - dio->bios_in_flight++; + dio->refcount++; spin_unlock_irqrestore(&dio->bio_lock, flags); + if (dio->is_async && dio->rw == READ) bio_set_pages_dirty(bio); + submit_bio(dio->rw, bio); dio->bio = NULL; @@ -391,27 +356,37 @@ static void dio_cleanup(struct dio *dio) } /* - * Wait for the next BIO to complete. Remove it and return it. + * Wait for the next BIO to complete. Remove it and return it. NULL is + * returned once all BIOs have been completed. This must only be called once + * all bios have been issued so that dio->refcount can only decrease. This + * requires that that the caller hold a reference on the dio. */ static struct bio *dio_await_one(struct dio *dio) { unsigned long flags; - struct bio *bio; + struct bio *bio = NULL; spin_lock_irqsave(&dio->bio_lock, flags); - while (dio->bio_list == NULL) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (dio->bio_list == NULL) { - dio->waiter = current; - spin_unlock_irqrestore(&dio->bio_lock, flags); - io_schedule(); - spin_lock_irqsave(&dio->bio_lock, flags); - dio->waiter = NULL; - } - set_current_state(TASK_RUNNING); + + /* + * Wait as long as the list is empty and there are bios in flight. bio + * completion drops the count, maybe adds to the list, and wakes while + * holding the bio_lock so we don't need set_current_state()'s barrier + * and can call it after testing our condition. + */ + while (dio->refcount > 1 && dio->bio_list == NULL) { + __set_current_state(TASK_UNINTERRUPTIBLE); + dio->waiter = current; + spin_unlock_irqrestore(&dio->bio_lock, flags); + io_schedule(); + /* wake up sets us TASK_RUNNING */ + spin_lock_irqsave(&dio->bio_lock, flags); + dio->waiter = NULL; + } + if (dio->bio_list) { + bio = dio->bio_list; + dio->bio_list = bio->bi_private; } - bio = dio->bio_list; - dio->bio_list = bio->bi_private; spin_unlock_irqrestore(&dio->bio_lock, flags); return bio; } @@ -440,25 +415,24 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) } bio_put(bio); } - finished_one_bio(dio); return uptodate ? 0 : -EIO; } /* - * Wait on and process all in-flight BIOs. + * Wait on and process all in-flight BIOs. This must only be called once + * all bios have been issued so that the refcount can only decrease. + * This just waits for all bios to make it through dio_bio_complete. IO + * errors are propagated through dio->io_error and should be propagated via + * dio_complete(). */ static void dio_await_completion(struct dio *dio) { - /* - * The bio_lock is not held for the read of bio_count. - * This is ok since it is the dio_bio_complete() that changes - * bio_count. - */ - while (dio->bio_count) { - struct bio *bio = dio_await_one(dio); - /* io errors are propogated through dio->io_error */ - dio_bio_complete(dio, bio); - } + struct bio *bio; + do { + bio = dio_await_one(dio); + if (bio) + dio_bio_complete(dio, bio); + } while (bio); } /* @@ -781,7 +755,7 @@ static void dio_zero_block(struct dio *dio, int end) this_chunk_bytes = this_chunk_blocks << dio->blkbits; - page = ZERO_PAGE(dio->curr_user_address); + page = ZERO_PAGE(0); if (submit_page_section(dio, page, 0, this_chunk_bytes, dio->next_block_for_io)) return; @@ -876,7 +850,6 @@ static int do_direct_IO(struct dio *dio) do_holes: /* Handle holes */ if (!buffer_mapped(map_bh)) { - char *kaddr; loff_t i_size_aligned; /* AKPM: eargh, -ENOTBLK is a hack */ @@ -897,11 +870,8 @@ do_holes: page_cache_release(page); goto out; } - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + (block_in_page << blkbits), - 0, 1 << blkbits); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user(page, block_in_page << blkbits, + 1 << blkbits); dio->block_in_file++; block_in_page++; goto next_block; @@ -965,49 +935,28 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, struct dio *dio) { unsigned long user_addr; + unsigned long flags; int seg; ssize_t ret = 0; ssize_t ret2; size_t bytes; - dio->bio = NULL; dio->inode = inode; dio->rw = rw; dio->blkbits = blkbits; dio->blkfactor = inode->i_blkbits - blkbits; - dio->start_zero_done = 0; - dio->size = 0; dio->block_in_file = offset >> blkbits; - dio->blocks_available = 0; - dio->cur_page = NULL; - dio->boundary = 0; - dio->reap_counter = 0; dio->get_block = get_block; dio->end_io = end_io; - dio->map_bh.b_private = NULL; dio->final_block_in_bio = -1; dio->next_block_for_io = -1; - dio->page_errors = 0; - dio->io_error = 0; - dio->result = 0; dio->iocb = iocb; dio->i_size = i_size_read(inode); - /* - * BIO completion state. - * - * ->bio_count starts out at one, and we decrement it to zero after all - * BIOs are submitted. This to avoid the situation where a really fast - * (or synchronous) device could take the count to zero while we're - * still submitting BIOs. - */ - dio->bio_count = 1; - dio->bios_in_flight = 0; spin_lock_init(&dio->bio_lock); - dio->bio_list = NULL; - dio->waiter = NULL; + dio->refcount = 1; /* * In case of non-aligned buffers, we may need 2 more @@ -1015,8 +964,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, */ if (unlikely(dio->blkfactor)) dio->pages_in_io = 2; - else - dio->pages_in_io = 0; for (seg = 0; seg < nr_segs; seg++) { user_addr = (unsigned long)iov[seg].iov_base; @@ -1099,55 +1046,41 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, mutex_unlock(&dio->inode->i_mutex); /* - * OK, all BIOs are submitted, so we can decrement bio_count to truly - * reflect the number of to-be-processed BIOs. + * The only time we want to leave bios in flight is when a successful + * partial aio read or full aio write have been setup. In that case + * bio completion will call aio_complete. The only time it's safe to + * call aio_complete is when we return -EIOCBQUEUED, so we key on that. + * This had *better* be the only place that raises -EIOCBQUEUED. */ - if (dio->is_async) { - int should_wait = 0; - - if (dio->result < dio->size && (rw & WRITE)) { - dio->waiter = current; - should_wait = 1; - } - if (ret == 0) - ret = dio->result; - finished_one_bio(dio); /* This can free the dio */ - if (should_wait) { - unsigned long flags; - /* - * Wait for already issued I/O to drain out and - * release its references to user-space pages - * before returning to fallback on buffered I/O - */ + BUG_ON(ret == -EIOCBQUEUED); + if (dio->is_async && ret == 0 && dio->result && + ((rw & READ) || (dio->result == dio->size))) + ret = -EIOCBQUEUED; - spin_lock_irqsave(&dio->bio_lock, flags); - set_current_state(TASK_UNINTERRUPTIBLE); - while (dio->bio_count) { - spin_unlock_irqrestore(&dio->bio_lock, flags); - io_schedule(); - spin_lock_irqsave(&dio->bio_lock, flags); - set_current_state(TASK_UNINTERRUPTIBLE); - } - spin_unlock_irqrestore(&dio->bio_lock, flags); - set_current_state(TASK_RUNNING); - kfree(dio); - } - } else { - finished_one_bio(dio); + if (ret != -EIOCBQUEUED) dio_await_completion(dio); - ret = dio_complete(dio, offset, ret); + /* + * Sync will always be dropping the final ref and completing the + * operation. AIO can if it was a broken operation described above or + * in fact if all the bios race to complete before we get here. In + * that case dio_complete() translates the EIOCBQUEUED into the proper + * return code that the caller will hand to aio_complete(). + * + * This is managed by the bio_lock instead of being an atomic_t so that + * completion paths can drop their ref and use the remaining count to + * decide to wake the submission path atomically. + */ + spin_lock_irqsave(&dio->bio_lock, flags); + ret2 = --dio->refcount; + spin_unlock_irqrestore(&dio->bio_lock, flags); - /* We could have also come here on an AIO file extend */ - if (!is_sync_kiocb(iocb) && (rw & WRITE) && - ret >= 0 && dio->result == dio->size) - /* - * For AIO writes where we have completed the - * i/o, we have to mark the the aio complete. - */ - aio_complete(iocb, ret, 0); + if (ret2 == 0) { + ret = dio_complete(dio, offset, ret); kfree(dio); - } + } else + BUG_ON(ret != -EIOCBQUEUED); + return ret; } @@ -1191,10 +1124,10 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, int acquire_i_mutex = 0; if (rw & WRITE) - rw = WRITE_SYNC; + rw = WRITE_ODIRECT; if (bdev) - bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); + bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); if (offset & blocksize_mask) { if (bdev) @@ -1218,7 +1151,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, } } - dio = kmalloc(sizeof(*dio), GFP_KERNEL); + dio = kzalloc(sizeof(*dio), GFP_KERNEL); retval = -ENOMEM; if (!dio) goto out; @@ -1274,6 +1207,19 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, retval = direct_io_worker(rw, iocb, inode, iov, offset, nr_segs, blkbits, get_block, end_io, dio); + /* + * In case of error extending write may have instantiated a few + * blocks outside i_size. Trim these off again for DIO_LOCKING. + * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this by + * it's own meaner. + */ + if (unlikely(retval < 0 && (rw & WRITE))) { + loff_t isize = i_size_read(inode); + + if (end > isize && dio_lock_type == DIO_LOCKING) + vmtruncate(inode, isize); + } + if (rw == READ && dio_lock_type == DIO_LOCKING) release_i_mutex = 0;