mmc: s3c6410: enable ADMA feature in 6410 sdhci controller
[safe/jmp/linux-2.6] / kernel / hung_task.c
index ba8ccd4..0c642d5 100644 (file)
 #include <linux/sysctl.h>
 
 /*
- * Have a reasonable limit on the number of tasks checked:
+ * The number of tasks checked:
  */
-unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
+unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
+
+/*
+ * Limit number of tasks checked in a batch.
+ *
+ * This value controls the preemptibility of khungtaskd since preemption
+ * is disabled during the critical section. It also controls the size of
+ * the RCU grace period. So it needs to be upper-bound.
+ */
+#define HUNG_TASK_BATCHING 1024
 
 /*
  * Zero means infinite timeout - no checking done:
  */
 unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
-static unsigned long __read_mostly hung_task_poll_jiffies;
 
 unsigned long __read_mostly sysctl_hung_task_warnings = 10;
 
@@ -60,33 +68,23 @@ static struct notifier_block panic_block = {
        .notifier_call = hung_task_panic,
 };
 
-/*
- * Returns seconds, approximately.  We don't need nanosecond
- * resolution, and we don't need to waste time with a big divide when
- * 2^30ns == 1.074s.
- */
-static unsigned long get_timestamp(void)
-{
-       int this_cpu = raw_smp_processor_id();
-
-       return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
-}
-
-static void check_hung_task(struct task_struct *t, unsigned long now,
-                           unsigned long timeout)
+static void check_hung_task(struct task_struct *t, unsigned long timeout)
 {
        unsigned long switch_count = t->nvcsw + t->nivcsw;
 
-       if (t->flags & PF_FROZEN)
+       /*
+        * Ensure the task is not frozen.
+        * Also, when a freshly created task is scheduled once, changes
+        * its state to TASK_UNINTERRUPTIBLE without having ever been
+        * switched out once, it musn't be checked.
+        */
+       if (unlikely(t->flags & PF_FROZEN || !switch_count))
                return;
 
-       if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
+       if (switch_count != t->last_switch_count) {
                t->last_switch_count = switch_count;
-               t->last_switch_timestamp = now;
                return;
        }
-       if ((long)(now - t->last_switch_timestamp) < timeout)
-               return;
        if (!sysctl_hung_task_warnings)
                return;
        sysctl_hung_task_warnings--;
@@ -102,7 +100,6 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
        sched_show_task(t);
        __debug_show_held_locks(t);
 
-       t->last_switch_timestamp = now;
        touch_nmi_watchdog();
 
        if (sysctl_hung_task_panic)
@@ -110,6 +107,24 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
 }
 
 /*
+ * To avoid extending the RCU grace period for an unbounded amount of time,
+ * periodically exit the critical section and enter a new one.
+ *
+ * For preemptible RCU it is sufficient to call rcu_read_unlock in order
+ * exit the grace period. For classic RCU, a reschedule is required.
+ */
+static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
+{
+       get_task_struct(g);
+       get_task_struct(t);
+       rcu_read_unlock();
+       cond_resched();
+       rcu_read_lock();
+       put_task_struct(t);
+       put_task_struct(g);
+}
+
+/*
  * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
  * a really long time (120 seconds). If that happens, print out
  * a warning.
@@ -117,7 +132,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
 static void check_hung_uninterruptible_tasks(unsigned long timeout)
 {
        int max_count = sysctl_hung_task_check_count;
-       unsigned long now = get_timestamp();
+       int batch_count = HUNG_TASK_BATCHING;
        struct task_struct *g, *t;
 
        /*
@@ -127,43 +142,45 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
        if (test_taint(TAINT_DIE) || did_panic)
                return;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        do_each_thread(g, t) {
-               if (!--max_count)
+               if (!max_count--)
                        goto unlock;
+               if (!--batch_count) {
+                       batch_count = HUNG_TASK_BATCHING;
+                       rcu_lock_break(g, t);
+                       /* Exit if t or g was unhashed during refresh. */
+                       if (t->state == TASK_DEAD || g->state == TASK_DEAD)
+                               goto unlock;
+               }
                /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
                if (t->state == TASK_UNINTERRUPTIBLE)
-                       check_hung_task(t, now, timeout);
+                       check_hung_task(t, timeout);
        } while_each_thread(g, t);
  unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 }
 
-static void update_poll_jiffies(void)
+static unsigned long timeout_jiffies(unsigned long timeout)
 {
        /* timeout of 0 will disable the watchdog */
-       if (sysctl_hung_task_timeout_secs == 0)
-               hung_task_poll_jiffies = MAX_SCHEDULE_TIMEOUT;
-       else
-               hung_task_poll_jiffies = sysctl_hung_task_timeout_secs * HZ / 2;
+       return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
 }
 
 /*
  * Process updating of timeout sysctl
  */
 int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
-                                 struct file *filp, void __user *buffer,
+                                 void __user *buffer,
                                  size_t *lenp, loff_t *ppos)
 {
        int ret;
 
-       ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 
        if (ret || !write)
                goto out;
 
-       update_poll_jiffies();
-
        wake_up_process(watchdog_task);
 
  out:
@@ -176,20 +193,14 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
 static int watchdog(void *dummy)
 {
        set_user_nice(current, 0);
-       update_poll_jiffies();
 
        for ( ; ; ) {
-               unsigned long timeout;
+               unsigned long timeout = sysctl_hung_task_timeout_secs;
 
-               while (schedule_timeout_interruptible(hung_task_poll_jiffies));
+               while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
+                       timeout = sysctl_hung_task_timeout_secs;
 
-               /*
-                * Need to cache timeout here to avoid timeout being set
-                * to 0 via sysctl while inside check_hung_*_tasks().
-                */
-               timeout = sysctl_hung_task_timeout_secs;
-               if (timeout)
-                       check_hung_uninterruptible_tasks(timeout);
+               check_hung_uninterruptible_tasks(timeout);
        }
 
        return 0;