struct nested_call_node {
struct list_head llink;
void *cookie;
- int cpu;
+ void *ctx;
};
/*
* @nproc: Nested call core function pointer.
* @priv: Opaque data to be passed to the @nproc callback.
* @cookie: Cookie to be used to identify this nested call.
+ * @ctx: This instance context.
*
* Returns: Returns the code returned by the @nproc callback, or -1 if
* the maximum recursion limit has been exceeded.
*/
static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
int (*nproc)(void *, void *, int), void *priv,
- void *cookie)
+ void *cookie, void *ctx)
{
int error, call_nests = 0;
unsigned long flags;
- int this_cpu = get_cpu();
struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur;
struct nested_call_node tnode;
* very much limited.
*/
list_for_each_entry(tncur, lsthead, llink) {
- if (tncur->cpu == this_cpu &&
+ if (tncur->ctx == ctx &&
(tncur->cookie == cookie || ++call_nests > max_nests)) {
/*
* Ops ... loop detected or maximum nest level reached.
}
/* Add the current task and cookie to the list */
- tnode.cpu = this_cpu;
+ tnode.ctx = ctx;
tnode.cookie = cookie;
list_add(&tnode.llink, lsthead);
/* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink);
- out_unlock:
+out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags);
- put_cpu();
return error;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
+ unsigned long events, int subclass)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
+ wake_up_locked_poll(wqueue, events);
+ spin_unlock_irqrestore(&wqueue->lock, flags);
+}
+#else
+static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
+ unsigned long events, int subclass)
+{
+ wake_up_poll(wqueue, events);
+}
+#endif
+
static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
- wake_up_nested((wait_queue_head_t *) cookie, 1 + call_nests);
+ ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
+ 1 + call_nests);
return 0;
}
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
+ int this_cpu = get_cpu();
+
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
- ep_poll_wakeup_proc, NULL, wq);
+ ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+
+ put_cpu();
}
/*
* could re-enter here.
*/
pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
- ep_poll_readyevents_proc, ep, ep);
+ ep_poll_readyevents_proc, ep, ep, current);
return pollflags != -1 ? pollflags : 0;
}
goto out_unlock;
/*
+ * Check the events coming with the callback. At this stage, not
+ * every device reports the events in the "key" parameter of the
+ * callback. We need to be able to handle both cases here, hence the
+ * test for "key" != NULL before the event match test.
+ */
+ if (key && !((unsigned long) key & epi->event.events))
+ goto out_unlock;
+
+ /*
* If we are trasfering events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
* semantics). All the events that happens during that period of time are
SYSCALL_DEFINE1(epoll_create, int, size)
{
- if (size < 0)
+ if (size <= 0)
return -EINVAL;
return sys_epoll_create1(0);
case EPOLL_CTL_ADD:
if (!epi) {
epds.events |= POLLERR | POLLHUP;
-
error = ep_insert(ep, &epds, tfile, fd);
} else
error = -EEXIST;