X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Faio.c;h=f658441d5666c9d108f8ec337b2e0909686723bb;hb=589dc2602f2a1b7fa5e59b90f548af189f128d77;hp=55991e4132a7f30f778a7f86862b8992c59bafca;hpb=dee11c2364f51cac53df17d742a0c69097e29a4e;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/aio.c b/fs/aio.c index 55991e4..f658441 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -68,10 +69,8 @@ static void aio_queue_work(struct kioctx *); */ static int __init aio_setup(void) { - kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); + kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); aio_wq = create_workqueue("aio"); @@ -132,11 +131,10 @@ static int aio_setup_ring(struct kioctx *ctx) dprintk("attempting mmap of %lu bytes\n", info->mmap_size); down_write(&ctx->mm->mmap_sem); info->mmap_base = do_mmap(NULL, 0, info->mmap_size, - PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, + PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 0); if (IS_ERR((void *)info->mmap_base)) { up_write(&ctx->mm->mmap_sem); - printk("mmap err: %ld\n", -info->mmap_base); info->mmap_size = 0; aio_free_ring(ctx); return -EAGAIN; @@ -193,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx) kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ } while(0) + +/* __put_ioctx + * Called when the last user of an aio context has gone away, + * and the struct needs to be freed. + */ +static void __put_ioctx(struct kioctx *ctx) +{ + unsigned nr_events = ctx->max_reqs; + + BUG_ON(ctx->reqs_active); + + cancel_delayed_work(&ctx->wq); + cancel_work_sync(&ctx->wq.work); + aio_free_ring(ctx); + mmdrop(ctx->mm); + ctx->mm = NULL; + pr_debug("__put_ioctx: freeing %p\n", ctx); + kmem_cache_free(kioctx_cachep, ctx); + + if (nr_events) { + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - nr_events > aio_nr); + aio_nr -= nr_events; + spin_unlock(&aio_nr_lock); + } +} + +#define get_ioctx(kioctx) do { \ + BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ + atomic_inc(&(kioctx)->users); \ +} while (0) +#define put_ioctx(kioctx) do { \ + BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ + if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ + __put_ioctx(kioctx); \ +} while (0) + /* ioctx_alloc * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. */ @@ -211,11 +246,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) if ((unsigned long)nr_events > aio_max_nr) return ERR_PTR(-EAGAIN); - ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); + ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); - memset(ctx, 0, sizeof(*ctx)); ctx->max_reqs = nr_events; mm = ctx->mm = current->mm; atomic_inc(&mm->mm_count); @@ -243,7 +277,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) if (ctx->max_reqs == 0) goto out_cleanup; - /* now link into global list. kludge. FIXME */ + /* now link into global list. */ write_lock(&mm->ioctx_list_lock); ctx->next = mm->ioctx_list; mm->ioctx_list = ctx; @@ -306,7 +340,7 @@ static void wait_for_all_aios(struct kioctx *ctx) set_task_state(tsk, TASK_UNINTERRUPTIBLE); while (ctx->reqs_active) { spin_unlock_irq(&ctx->ctx_lock); - schedule(); + io_schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); spin_lock_irq(&ctx->ctx_lock); } @@ -320,13 +354,13 @@ out: /* wait_on_sync_kiocb: * Waits on the given sync kiocb to complete. */ -ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) +ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { while (iocb->ki_users) { set_current_state(TASK_UNINTERRUPTIBLE); if (!iocb->ki_users) break; - schedule(); + io_schedule(); } __set_current_state(TASK_RUNNING); return iocb->ki_user_data; @@ -339,7 +373,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) * go away, they will call put_ioctx and release any pinned memory * associated with the request (held via struct page * references). */ -void fastcall exit_aio(struct mm_struct *mm) +void exit_aio(struct mm_struct *mm) { struct kioctx *ctx = mm->ioctx_list; mm->ioctx_list = NULL; @@ -350,10 +384,9 @@ void fastcall exit_aio(struct mm_struct *mm) wait_for_all_aios(ctx); /* - * this is an overkill, but ensures we don't leave - * the ctx on the aio_wq + * Ensure we don't leave the ctx on the aio_wq */ - flush_workqueue(aio_wq); + cancel_work_sync(&ctx->wq.work); if (1 != atomic_read(&ctx->users)) printk(KERN_DEBUG @@ -365,32 +398,6 @@ void fastcall exit_aio(struct mm_struct *mm) } } -/* __put_ioctx - * Called when the last user of an aio context has gone away, - * and the struct needs to be freed. - */ -void fastcall __put_ioctx(struct kioctx *ctx) -{ - unsigned nr_events = ctx->max_reqs; - - BUG_ON(ctx->reqs_active); - - cancel_delayed_work(&ctx->wq); - flush_workqueue(aio_wq); - aio_free_ring(ctx); - mmdrop(ctx->mm); - ctx->mm = NULL; - pr_debug("__put_ioctx: freeing %p\n", ctx); - kmem_cache_free(kioctx_cachep, ctx); - - if (nr_events) { - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - nr_events > aio_nr); - aio_nr -= nr_events; - spin_unlock(&aio_nr_lock); - } -} - /* aio_get_req * Allocate a slot for an aio request. Increments the users count * of the kioctx so that the kioctx stays around until all requests are @@ -401,8 +408,7 @@ void fastcall __put_ioctx(struct kioctx *ctx) * This prevents races between the aio code path referencing the * req (after submitting it) and aio_complete() freeing the req. */ -static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx)); -static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) +static struct kiocb *__aio_get_req(struct kioctx *ctx) { struct kiocb *req = NULL; struct aio_ring *ring; @@ -422,6 +428,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) req->private = NULL; req->ki_iovec = NULL; INIT_LIST_HEAD(&req->ki_run_list); + req->ki_eventfd = ERR_PTR(-EINVAL); /* Check if the completion queue has enough free space to * accept an event from this io. @@ -463,6 +470,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) { assert_spin_locked(&ctx->ctx_lock); + if (!IS_ERR(req->ki_eventfd)) + fput(req->ki_eventfd); if (req->ki_dtor) req->ki_dtor(req); if (req->ki_iovec != &req->ki_inline_vec) @@ -503,8 +512,8 @@ static void aio_fput_routine(struct work_struct *data) */ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) { - dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", - req, atomic_read(&req->ki_filp->f_count)); + dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", + req, atomic_long_read(&req->ki_filp->f_count)); assert_spin_locked(&ctx->ctx_lock); @@ -519,7 +528,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) /* Must be done under the lock to serialise against cancellation. * Call this aio_fput as it duplicates fput via the fput_work. */ - if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) { + if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { get_ioctx(ctx); spin_lock(&fput_lock); list_add(&req->ki_list, &fput_head); @@ -534,7 +543,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) * Returns true if this put was the last user of the kiocb, * false if the request is still in use. */ -int fastcall aio_put_req(struct kiocb *req) +int aio_put_req(struct kiocb *req) { struct kioctx *ctx = req->ki_ctx; int ret; @@ -544,10 +553,7 @@ int fastcall aio_put_req(struct kiocb *req) return ret; } -/* Lookup an ioctx id. ioctx_list is lockless for reads. - * FIXME: this is O(n) and is only suitable for development. - */ -struct kioctx *lookup_ioctx(unsigned long ctx_id) +static struct kioctx *lookup_ioctx(unsigned long ctx_id) { struct kioctx *ioctx; struct mm_struct *mm; @@ -580,15 +586,10 @@ static void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); - tsk->flags |= PF_BORROWED_MM; active_mm = tsk->active_mm; atomic_inc(&mm->mm_count); tsk->mm = mm; tsk->active_mm = mm; - /* - * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise - * it won't work. Update it accordingly if you change it here - */ switch_mm(active_mm, mm, tsk); task_unlock(tsk); @@ -608,7 +609,6 @@ static void unuse_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); - tsk->flags &= ~PF_BORROWED_MM; tsk->mm = NULL; /* active_mm is still 'mm' */ enter_lazy_tlb(mm, tsk); @@ -711,18 +711,9 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) /* * Now we are all set to call the retry method in async - * context. By setting this thread's io_wait context - * to point to the wait queue entry inside the currently - * running iocb for the duration of the retry, we ensure - * that async notification wakeups are queued by the - * operation instead of blocking waits, and when notified, - * cause the iocb to be kicked for continuation (through - * the aio_wake_function callback). + * context. */ - BUG_ON(current->io_wait != NULL); - current->io_wait = &iocb->ki_wait; ret = retry(iocb); - current->io_wait = NULL; if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { BUG_ON(!list_empty(&iocb->ki_wait.task_list)); @@ -903,7 +894,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb) * The retry is usually executed by aio workqueue * threads (See aio_kick_handler). */ -void fastcall kick_iocb(struct kiocb *iocb) +void kick_iocb(struct kiocb *iocb) { /* sync iocbs are easy: they can only ever be executing from a * single context. */ @@ -922,7 +913,7 @@ EXPORT_SYMBOL(kick_iocb); * Returns true if this is the last user of the request. The * only other user of the request can be the cancellation code. */ -int fastcall aio_complete(struct kiocb *iocb, long res, long res2) +int aio_complete(struct kiocb *iocb, long res, long res2) { struct kioctx *ctx = iocb->ki_ctx; struct aio_ring_info *info; @@ -995,10 +986,27 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) kunmap_atomic(ring, KM_IRQ1); pr_debug("added to ring %p at [%lu]\n", iocb, tail); + + /* + * Check if the user asked us to deliver the result through an + * eventfd. The eventfd_signal() function is safe to be called + * from IRQ context. + */ + if (!IS_ERR(iocb->ki_eventfd)) + eventfd_signal(iocb->ki_eventfd, 1); + put_rq: /* everything turned out well, dispose of the aiocb. */ ret = __aio_put_req(ctx, iocb); + /* + * We have to order our ring_info tail store above and test + * of the wait list below outside the wait lock. This is + * like in wake_up_bit() where clearing a bit has to be + * ordered with the unlocked test. + */ + smp_mb(); + if (waitqueue_active(&ctx->wait)) wake_up(&ctx->wait); @@ -1064,9 +1072,7 @@ static void timeout_func(unsigned long data) static inline void init_timeout(struct aio_timeout *to) { - init_timer(&to->timer); - to->timer.data = (unsigned long)to; - to->timer.function = timeout_func; + setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); to->timed_out = 0; to->p = current; } @@ -1160,10 +1166,18 @@ retry: break; if (min_nr <= i) break; - ret = 0; + if (unlikely(ctx->dead)) { + ret = -EINVAL; + break; + } if (to.timed_out) /* Only check after read evt */ break; - schedule(); + /* Try to only show up in io wait if there are ops + * in flight */ + if (ctx->reqs_active) + io_schedule(); + else + schedule(); if (signal_pending(tsk)) { ret = -EINTR; break; @@ -1191,6 +1205,7 @@ retry: if (timeout) clear_timeout(&to); out: + destroy_timer_on_stack(&to.timer); return i ? i : ret; } @@ -1220,6 +1235,13 @@ static void io_destroy(struct kioctx *ioctx) aio_cancel_all(ioctx); wait_for_all_aios(ioctx); + + /* + * Wake up any waiters. The setting of ctx->dead must be seen + * by other CPUs at this point. Right now, we rely on the + * locking done by the above calls to ensure this consistency. + */ + wake_up(&ioctx->wait); put_ioctx(ioctx); /* once for the lookup */ } @@ -1327,6 +1349,10 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb) opcode = IOCB_CMD_PWRITEV; } + /* This matches the pread()/pwrite() logic */ + if (iocb->ki_pos < 0) + return -EINVAL; + do { ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], iocb->ki_nr_segs - iocb->ki_cur_seg, @@ -1345,6 +1371,13 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb) if ((ret == 0) || (iocb->ki_left == 0)) ret = iocb->ki_nbytes - iocb->ki_left; + /* If we managed to write some out we return that, rather than + * the eventual error. */ + if (opcode == IOCB_CMD_PWRITEV + && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY + && iocb->ki_nbytes - iocb->ki_left) + ret = iocb->ki_nbytes - iocb->ki_left; + return ret; } @@ -1501,10 +1534,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) * Simply triggers a retry of the operation via kick_iocb. * * This callback is specified in the wait queue entry in - * a kiocb (current->io_wait points to this wait queue - * entry when an aio operation executes; it is used - * instead of a synchronous wait when an i/o blocking - * condition is encountered during aio). + * a kiocb. * * Note: * This routine is executed with the wait queue lock held. @@ -1523,7 +1553,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode, return 1; } -int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, +static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb) { struct kiocb *req; @@ -1531,8 +1561,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, ssize_t ret; /* enforce forwards compatibility on users */ - if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 || - iocb->aio_reserved3)) { + if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { pr_debug("EINVAL: io_submit: reserve field set\n"); return -EINVAL; } @@ -1556,8 +1585,21 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, fput(file); return -EAGAIN; } - req->ki_filp = file; + if (iocb->aio_flags & IOCB_FLAG_RESFD) { + /* + * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an + * instance of the file* now. The file descriptor must be + * an eventfd() fd, and will be signaled for each completed + * event using the eventfd_signal() function. + */ + req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); + if (IS_ERR(req->ki_eventfd)) { + ret = PTR_ERR(req->ki_eventfd); + goto out_put_req; + } + } + ret = put_user(req->ki_key, &user_iocb->aio_key); if (unlikely(ret)) { dprintk("EFAULT: aio_key\n"); @@ -1760,6 +1802,7 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id, put_ioctx(ioctx); } + asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); return ret; }