mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
[safe/jmp/linux-2.6] / mm / pdflush.c
index 8ce0900..235ac44 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2002, Linus Torvalds.
  *
- * 09Apr2002   akpm@zip.com.au
+ * 09Apr2002   Andrew Morton
  *             Initial version
  * 29Feb2004   kaos@sgi.com
  *             Move worker thread creation to kthread to avoid chewing
@@ -17,8 +17,8 @@
 #include <linux/gfp.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/fs.h>          // Needed by writeback.h
-#include <linux/writeback.h>   // Prototypes pdflush_operation()
+#include <linux/fs.h>          /* Needed by writeback.h          */
+#include <linux/writeback.h>   /* Prototypes pdflush_operation() */
 #include <linux/kthread.h>
 #include <linux/cpuset.h>
 #include <linux/freezer.h>
@@ -92,12 +92,12 @@ struct pdflush_work {
 static int __pdflush(struct pdflush_work *my_work)
 {
        current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+       set_freezable();
        my_work->fn = NULL;
        my_work->who = current;
        INIT_LIST_HEAD(&my_work->list);
 
        spin_lock_irq(&pdflush_lock);
-       nr_pdflush_threads++;
        for ( ; ; ) {
                struct pdflush_work *pdf;
 
@@ -125,20 +125,26 @@ static int __pdflush(struct pdflush_work *my_work)
 
                (*my_work->fn)(my_work->arg0);
 
+               spin_lock_irq(&pdflush_lock);
+
                /*
                 * Thread creation: For how long have there been zero
                 * available threads?
+                *
+                * To throttle creation, we reset last_empty_jifs.
                 */
-               if (jiffies - last_empty_jifs > 1 * HZ) {
-                       /* unlocked list_empty() test is OK here */
+               if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
                        if (list_empty(&pdflush_list)) {
-                               /* unlocked test is OK here */
-                               if (nr_pdflush_threads < MAX_PDFLUSH_THREADS)
+                               if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
+                                       last_empty_jifs = jiffies;
+                                       nr_pdflush_threads++;
+                                       spin_unlock_irq(&pdflush_lock);
                                        start_one_pdflush_thread();
+                                       spin_lock_irq(&pdflush_lock);
+                               }
                        }
                }
 
-               spin_lock_irq(&pdflush_lock);
                my_work->fn = NULL;
 
                /*
@@ -150,7 +156,7 @@ static int __pdflush(struct pdflush_work *my_work)
                if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
                        continue;
                pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
-               if (jiffies - pdf->when_i_went_to_sleep > 1 * HZ) {
+               if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
                        /* Limit exit rate */
                        pdf->when_i_went_to_sleep = jiffies;
                        break;                                  /* exeunt */
@@ -171,7 +177,16 @@ static int __pdflush(struct pdflush_work *my_work)
 static int pdflush(void *dummy)
 {
        struct pdflush_work my_work;
-       cpumask_t cpus_allowed;
+       cpumask_var_t cpus_allowed;
+
+       /*
+        * Since the caller doesn't even check kthread_run() worked, let's not
+        * freak out too much if this fails.
+        */
+       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+               printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
+               return 0;
+       }
 
        /*
         * pdflush can spend a lot of time doing encryption via dm-crypt.  We
@@ -181,13 +196,14 @@ static int pdflush(void *dummy)
 
        /*
         * Some configs put our parent kthread in a limited cpuset,
-        * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL.
+        * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
         * Our needs are more modest - cut back to our cpusets cpus_allowed.
         * This is needed as pdflush's are dynamically created and destroyed.
         * The boottime pdflush's are easily placed w/o these 2 lines.
         */
-       cpus_allowed = cpuset_cpus_allowed(current);
-       set_cpus_allowed(current, cpus_allowed);
+       cpuset_cpus_allowed(current, cpus_allowed);
+       set_cpus_allowed_ptr(current, cpus_allowed);
+       free_cpumask_var(cpus_allowed);
 
        return __pdflush(&my_work);
 }
@@ -206,7 +222,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
 
        spin_lock_irqsave(&pdflush_lock, flags);
        if (list_empty(&pdflush_list)) {
-               spin_unlock_irqrestore(&pdflush_lock, flags);
                ret = -1;
        } else {
                struct pdflush_work *pdf;
@@ -218,20 +233,34 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
                pdf->fn = fn;
                pdf->arg0 = arg0;
                wake_up_process(pdf->who);
-               spin_unlock_irqrestore(&pdflush_lock, flags);
        }
+       spin_unlock_irqrestore(&pdflush_lock, flags);
+
        return ret;
 }
 
 static void start_one_pdflush_thread(void)
 {
-       kthread_run(pdflush, NULL, "pdflush");
+       struct task_struct *k;
+
+       k = kthread_run(pdflush, NULL, "pdflush");
+       if (unlikely(IS_ERR(k))) {
+               spin_lock_irq(&pdflush_lock);
+               nr_pdflush_threads--;
+               spin_unlock_irq(&pdflush_lock);
+       }
 }
 
 static int __init pdflush_init(void)
 {
        int i;
 
+       /*
+        * Pre-set nr_pdflush_threads...  If we fail to create,
+        * the count will be decremented.
+        */
+       nr_pdflush_threads = MIN_PDFLUSH_THREADS;
+
        for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
                start_one_pdflush_thread();
        return 0;