X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Feventpoll.c;h=3817149919cb81fa298686f183f67e0c86fe1c50;hb=bef5bc2464517cbbf8f85f09b5ade46904afec9a;hp=336fdb8ed47bd0bdeb69e67becd1b011eb97079f;hpb=4f0989dbfa8d18dd17c32120aac1eb3e906a62a2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 336fdb8..3817149 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -98,7 +98,7 @@ struct epoll_filefd { struct nested_call_node { struct list_head llink; void *cookie; - int cpu; + void *ctx; }; /* @@ -251,10 +251,10 @@ ctl_table epoll_table[] = { .data = &max_user_watches, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, - { .ctl_name = 0 } + { } }; #endif /* CONFIG_SYSCTL */ @@ -317,17 +317,17 @@ static void ep_nested_calls_init(struct nested_calls *ncalls) * @nproc: Nested call core function pointer. * @priv: Opaque data to be passed to the @nproc callback. * @cookie: Cookie to be used to identify this nested call. + * @ctx: This instance context. * * Returns: Returns the code returned by the @nproc callback, or -1 if * the maximum recursion limit has been exceeded. */ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, int (*nproc)(void *, void *, int), void *priv, - void *cookie) + void *cookie, void *ctx) { int error, call_nests = 0; unsigned long flags; - int this_cpu = get_cpu(); struct list_head *lsthead = &ncalls->tasks_call_list; struct nested_call_node *tncur; struct nested_call_node tnode; @@ -340,7 +340,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, * very much limited. */ list_for_each_entry(tncur, lsthead, llink) { - if (tncur->cpu == this_cpu && + if (tncur->ctx == ctx && (tncur->cookie == cookie || ++call_nests > max_nests)) { /* * Ops ... loop detected or maximum nest level reached. @@ -352,7 +352,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, } /* Add the current task and cookie to the list */ - tnode.cpu = this_cpu; + tnode.ctx = ctx; tnode.cookie = cookie; list_add(&tnode.llink, lsthead); @@ -364,16 +364,34 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, /* Remove the current task from the list */ spin_lock_irqsave(&ncalls->lock, flags); list_del(&tnode.llink); - out_unlock: +out_unlock: spin_unlock_irqrestore(&ncalls->lock, flags); - put_cpu(); return error; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, + unsigned long events, int subclass) +{ + unsigned long flags; + + spin_lock_irqsave_nested(&wqueue->lock, flags, subclass); + wake_up_locked_poll(wqueue, events); + spin_unlock_irqrestore(&wqueue->lock, flags); +} +#else +static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, + unsigned long events, int subclass) +{ + wake_up_poll(wqueue, events); +} +#endif + static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) { - wake_up_nested((wait_queue_head_t *) cookie, 1 + call_nests); + ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN, + 1 + call_nests); return 0; } @@ -389,8 +407,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) */ static void ep_poll_safewake(wait_queue_head_t *wq) { + int this_cpu = get_cpu(); + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, - ep_poll_wakeup_proc, NULL, wq); + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + + put_cpu(); } /* @@ -644,7 +666,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) * could re-enter here. */ pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, - ep_poll_readyevents_proc, ep, ep); + ep_poll_readyevents_proc, ep, ep, current); return pollflags != -1 ? pollflags : 0; } @@ -783,6 +805,15 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k goto out_unlock; /* + * Check the events coming with the callback. At this stage, not + * every device reports the events in the "key" parameter of the + * callback. We need to be able to handle both cases here, hence the + * test for "key" != NULL before the event match test. + */ + if (key && !((unsigned long) key & epi->event.events)) + goto out_unlock; + + /* * If we are trasfering events to userspace, we can hold no locks * (because we're accessing user memory, and because of linux f_op->poll() * semantics). All the events that happens during that period of time are @@ -1109,8 +1140,7 @@ retry: * ep_poll_callback() when events will become available. */ init_waitqueue_entry(&wait, current); - wait.flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue(&ep->wq, &wait); + __add_wait_queue_exclusive(&ep->wq, &wait); for (;;) { /* @@ -1175,7 +1205,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) * a file structure and a free file descriptor. */ error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, - flags & O_CLOEXEC); + O_RDWR | (flags & O_CLOEXEC)); if (error < 0) ep_free(ep); @@ -1184,7 +1214,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) SYSCALL_DEFINE1(epoll_create, int, size) { - if (size < 0) + if (size <= 0) return -EINVAL; return sys_epoll_create1(0); @@ -1254,7 +1284,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, case EPOLL_CTL_ADD: if (!epi) { epds.events |= POLLERR | POLLHUP; - error = ep_insert(ep, &epds, tfile, fd); } else error = -EEXIST;