mac80211: fix deauth before assoc
[safe/jmp/linux-2.6] / kernel / slow-work.c
index b763bc2..7d3f4fa 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/wait.h>
-#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
 #include "slow-work.h"
 
 static void slow_work_cull_timeout(unsigned long);
@@ -49,7 +49,6 @@ static const int slow_work_max_vslow = 99;
 
 ctl_table slow_work_sysctls[] = {
        {
-               .ctl_name       = CTL_UNNUMBERED,
                .procname       = "min-threads",
                .data           = &slow_work_min_threads,
                .maxlen         = sizeof(unsigned),
@@ -59,7 +58,6 @@ ctl_table slow_work_sysctls[] = {
                .extra2         = &slow_work_max_threads,
        },
        {
-               .ctl_name       = CTL_UNNUMBERED,
                .procname       = "max-threads",
                .data           = &slow_work_max_threads,
                .maxlen         = sizeof(unsigned),
@@ -69,16 +67,15 @@ ctl_table slow_work_sysctls[] = {
                .extra2         = (void *) &slow_work_max_max_threads,
        },
        {
-               .ctl_name       = CTL_UNNUMBERED,
                .procname       = "vslow-percentage",
                .data           = &vslow_work_proportion,
                .maxlen         = sizeof(unsigned),
                .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = (void *) &slow_work_min_vslow,
                .extra2         = (void *) &slow_work_max_vslow,
        },
-       { .ctl_name = 0 }
+       {}
 };
 #endif
 
@@ -109,12 +106,36 @@ static struct module *slow_work_unreg_module;
 static struct slow_work *slow_work_unreg_work_item;
 static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
 static DEFINE_MUTEX(slow_work_unreg_sync_lock);
+
+static void slow_work_set_thread_processing(int id, struct slow_work *work)
+{
+       if (work)
+               slow_work_thread_processing[id] = work->owner;
+}
+static void slow_work_done_thread_processing(int id, struct slow_work *work)
+{
+       struct module *module = slow_work_thread_processing[id];
+
+       slow_work_thread_processing[id] = NULL;
+       smp_mb();
+       if (slow_work_unreg_work_item == work ||
+           slow_work_unreg_module == module)
+               wake_up_all(&slow_work_unreg_wq);
+}
+static void slow_work_clear_thread_processing(int id)
+{
+       slow_work_thread_processing[id] = NULL;
+}
+#else
+static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
+static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
+static void slow_work_clear_thread_processing(int id) {}
 #endif
 
 /*
  * Data for tracking currently executing items for indication through /proc
  */
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
 struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
 pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
 DEFINE_RWLOCK(slow_work_execs_lock);
@@ -133,6 +154,15 @@ LIST_HEAD(vslow_work_queue);
 DEFINE_SPINLOCK(slow_work_queue_lock);
 
 /*
+ * The following are two wait queues that get pinged when a work item is placed
+ * on an empty queue.  These allow work items that are hogging a thread by
+ * sleeping in a way that could be deferred to yield their thread and enqueue
+ * themselves.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
+static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
+
+/*
  * The thread controls.  A variable used to signal to the threads that they
  * should exit when the queue is empty, a waitqueue used by the threads to wait
  * for signals, and a completion set by the last thread to exit.
@@ -188,9 +218,6 @@ static unsigned slow_work_calc_vsmax(void)
  */
 static noinline bool slow_work_execute(int id)
 {
-#ifdef CONFIG_MODULES
-       struct module *module;
-#endif
        struct slow_work *work = NULL;
        unsigned vsmax;
        bool very_slow;
@@ -227,10 +254,7 @@ static noinline bool slow_work_execute(int id)
                very_slow = false; /* avoid the compiler warning */
        }
 
-#ifdef CONFIG_MODULES
-       if (work)
-               slow_work_thread_processing[id] = work->owner;
-#endif
+       slow_work_set_thread_processing(id, work);
        if (work) {
                slow_work_mark_time(work);
                slow_work_begin_exec(id, work);
@@ -278,15 +302,7 @@ static noinline bool slow_work_execute(int id)
 
        /* sort out the race between module unloading and put_ref() */
        slow_work_put_ref(work);
-
-#ifdef CONFIG_MODULES
-       module = slow_work_thread_processing[id];
-       slow_work_thread_processing[id] = NULL;
-       smp_mb();
-       if (slow_work_unreg_work_item == work ||
-           slow_work_unreg_module == module)
-               wake_up_all(&slow_work_unreg_wq);
-#endif
+       slow_work_done_thread_processing(id, work);
 
        return true;
 
@@ -301,11 +317,55 @@ auto_requeue:
        else
                list_add_tail(&work->link, &slow_work_queue);
        spin_unlock_irq(&slow_work_queue_lock);
-       slow_work_thread_processing[id] = NULL;
+       slow_work_clear_thread_processing(id);
        return true;
 }
 
 /**
+ * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
+ * work: The work item under execution that wants to sleep
+ * _timeout: Scheduler sleep timeout
+ *
+ * Allow a requeueable work item to sleep on a slow-work processor thread until
+ * that thread is needed to do some other work or the sleep is interrupted by
+ * some other event.
+ *
+ * The caller must set up a wake up event before calling this and must have set
+ * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
+ * condition before calling this function as no test is made here.
+ *
+ * False is returned if there is nothing on the queue; true is returned if the
+ * work item should be requeued
+ */
+bool slow_work_sleep_till_thread_needed(struct slow_work *work,
+                                       signed long *_timeout)
+{
+       wait_queue_head_t *wfo_wq;
+       struct list_head *queue;
+
+       DEFINE_WAIT(wait);
+
+       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+               wfo_wq = &vslow_work_queue_waits_for_occupation;
+               queue = &vslow_work_queue;
+       } else {
+               wfo_wq = &slow_work_queue_waits_for_occupation;
+               queue = &slow_work_queue;
+       }
+
+       if (!list_empty(queue))
+               return true;
+
+       add_wait_queue_exclusive(wfo_wq, &wait);
+       if (list_empty(queue))
+               *_timeout = schedule_timeout(*_timeout);
+       finish_wait(wfo_wq, &wait);
+
+       return !list_empty(queue);
+}
+EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
+
+/**
  * slow_work_enqueue - Schedule a slow work item for processing
  * @work: The work item to queue
  *
@@ -335,6 +395,8 @@ auto_requeue:
  */
 int slow_work_enqueue(struct slow_work *work)
 {
+       wait_queue_head_t *wfo_wq;
+       struct list_head *queue;
        unsigned long flags;
        int ret;
 
@@ -354,6 +416,14 @@ int slow_work_enqueue(struct slow_work *work)
         * maintaining our promise
         */
        if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+               if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+                       wfo_wq = &vslow_work_queue_waits_for_occupation;
+                       queue = &vslow_work_queue;
+               } else {
+                       wfo_wq = &slow_work_queue_waits_for_occupation;
+                       queue = &slow_work_queue;
+               }
+
                spin_lock_irqsave(&slow_work_queue_lock, flags);
 
                if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
@@ -380,11 +450,13 @@ int slow_work_enqueue(struct slow_work *work)
                        if (ret < 0)
                                goto failed;
                        slow_work_mark_time(work);
-                       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
-                               list_add_tail(&work->link, &vslow_work_queue);
-                       else
-                               list_add_tail(&work->link, &slow_work_queue);
+                       list_add_tail(&work->link, queue);
                        wake_up(&slow_work_thread_wq);
+
+                       /* if someone who could be requeued is sleeping on a
+                        * thread, then ask them to yield their thread */
+                       if (work->link.prev == queue)
+                               wake_up(wfo_wq);
                }
 
                spin_unlock_irqrestore(&slow_work_queue_lock, flags);
@@ -487,9 +559,19 @@ EXPORT_SYMBOL(slow_work_cancel);
  */
 static void delayed_slow_work_timer(unsigned long data)
 {
+       wait_queue_head_t *wfo_wq;
+       struct list_head *queue;
        struct slow_work *work = (struct slow_work *) data;
        unsigned long flags;
-       bool queued = false, put = false;
+       bool queued = false, put = false, first = false;
+
+       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+               wfo_wq = &vslow_work_queue_waits_for_occupation;
+               queue = &vslow_work_queue;
+       } else {
+               wfo_wq = &slow_work_queue_waits_for_occupation;
+               queue = &slow_work_queue;
+       }
 
        spin_lock_irqsave(&slow_work_queue_lock, flags);
        if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
@@ -502,17 +584,18 @@ static void delayed_slow_work_timer(unsigned long data)
                        put = true;
                } else {
                        slow_work_mark_time(work);
-                       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
-                               list_add_tail(&work->link, &vslow_work_queue);
-                       else
-                               list_add_tail(&work->link, &slow_work_queue);
+                       list_add_tail(&work->link, queue);
                        queued = true;
+                       if (work->link.prev == queue)
+                               first = true;
                }
        }
 
        spin_unlock_irqrestore(&slow_work_queue_lock, flags);
        if (put)
                slow_work_put_ref(work);
+       if (first)
+               wake_up(wfo_wq);
        if (queued)
                wake_up(&slow_work_thread_wq);
 }
@@ -554,7 +637,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
                        goto cancelled;
 
                /* the timer holds a reference whilst it is pending */
-               ret = work->ops->get_ref(work);
+               ret = slow_work_get_ref(work);
                if (ret < 0)
                        goto cant_get_ref;
 
@@ -737,7 +820,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
 static const struct slow_work_ops slow_work_new_thread_ops = {
        .owner          = THIS_MODULE,
        .execute        = slow_work_new_thread_execute,
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
        .desc           = slow_work_new_thread_desc,
 #endif
 };
@@ -867,6 +950,7 @@ EXPORT_SYMBOL(slow_work_register_user);
  */
 static void slow_work_wait_for_items(struct module *module)
 {
+#ifdef CONFIG_MODULES
        DECLARE_WAITQUEUE(myself, current);
        struct slow_work *work;
        int loop;
@@ -913,6 +997,7 @@ static void slow_work_wait_for_items(struct module *module)
 
        remove_wait_queue(&slow_work_unreg_wq, &myself);
        mutex_unlock(&slow_work_unreg_sync_lock);
+#endif /* CONFIG_MODULES */
 }
 
 /**
@@ -967,9 +1052,15 @@ static int __init init_slow_work(void)
        if (slow_work_max_max_threads < nr_cpus * 2)
                slow_work_max_max_threads = nr_cpus * 2;
 #endif
-#ifdef CONFIG_SLOW_WORK_PROC
-       proc_create("slow_work_rq", S_IFREG | 0400, NULL,
-                   &slow_work_runqueue_fops);
+#ifdef CONFIG_SLOW_WORK_DEBUG
+       {
+               struct dentry *dbdir;
+
+               dbdir = debugfs_create_dir("slow_work", NULL);
+               if (dbdir && !IS_ERR(dbdir))
+                       debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
+                                           NULL, &slow_work_runqueue_fops);
+       }
 #endif
        return 0;
 }