string: factorize skip_spaces and export it to be generally available
[safe/jmp/linux-2.6] / mm / oom_kill.c
index c592965..492c986 100644 (file)
 int sysctl_panic_on_oom;
 int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks;
-static DEFINE_SPINLOCK(zone_scan_mutex);
+static DEFINE_SPINLOCK(zone_scan_lock);
 /* #define DEBUG */
 
+/*
+ * Is all threads of the target process nodes overlap ours?
+ */
+static int has_intersects_mems_allowed(struct task_struct *tsk)
+{
+       struct task_struct *t;
+
+       t = tsk;
+       do {
+               if (cpuset_mems_allowed_intersects(current, t))
+                       return 1;
+               t = next_thread(t);
+       } while (t != tsk);
+
+       return 0;
+}
+
 /**
  * badness - calculate a numeric value for how bad this task has been
  * @p: task struct of which task we should calculate
@@ -55,9 +72,16 @@ static DEFINE_SPINLOCK(zone_scan_mutex);
 
 unsigned long badness(struct task_struct *p, unsigned long uptime)
 {
-       unsigned long points, cpu_time, run_time, s;
+       unsigned long points, cpu_time, run_time;
        struct mm_struct *mm;
        struct task_struct *child;
+       int oom_adj = p->signal->oom_adj;
+       struct task_cputime task_time;
+       unsigned long utime;
+       unsigned long stime;
+
+       if (oom_adj == OOM_DISABLE)
+               return 0;
 
        task_lock(p);
        mm = p->mm;
@@ -79,7 +103,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
        /*
         * swapoff can easily use up all memory, so kill those first.
         */
-       if (p->flags & PF_SWAPOFF)
+       if (p->flags & PF_OOM_ORIGIN)
                return ULONG_MAX;
 
        /*
@@ -102,20 +126,21 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
          * of seconds. There is no particular reason for this other than
          * that it turned out to work very well in practice.
         */
-       cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
-               >> (SHIFT_HZ + 3);
+       thread_group_cputime(p, &task_time);
+       utime = cputime_to_jiffies(task_time.utime);
+       stime = cputime_to_jiffies(task_time.stime);
+       cpu_time = (utime + stime) >> (SHIFT_HZ + 3);
+
 
        if (uptime >= p->start_time.tv_sec)
                run_time = (uptime - p->start_time.tv_sec) >> 10;
        else
                run_time = 0;
 
-       s = int_sqrt(cpu_time);
-       if (s)
-               points /= s;
-       s = int_sqrt(int_sqrt(run_time));
-       if (s)
-               points /= s;
+       if (cpu_time)
+               points /= int_sqrt(cpu_time);
+       if (run_time)
+               points /= int_sqrt(int_sqrt(run_time));
 
        /*
         * Niced processes are most likely less important, so double
@@ -146,19 +171,19 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
         * because p may have allocated or otherwise mapped memory on
         * this node before. However it will be less likely.
         */
-       if (!cpuset_mems_allowed_intersects(current, p))
+       if (!has_intersects_mems_allowed(p))
                points /= 8;
 
        /*
-        * Adjust the score by oomkilladj.
+        * Adjust the score by oom_adj.
         */
-       if (p->oomkilladj) {
-               if (p->oomkilladj > 0) {
+       if (oom_adj) {
+               if (oom_adj > 0) {
                        if (!points)
                                points = 1;
-                       points <<= p->oomkilladj;
+                       points <<= oom_adj;
                } else
-                       points >>= -(p->oomkilladj);
+                       points >>= -(oom_adj);
        }
 
 #ifdef DEBUG
@@ -202,13 +227,13 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
 static struct task_struct *select_bad_process(unsigned long *ppoints,
                                                struct mem_cgroup *mem)
 {
-       struct task_struct *g, *p;
+       struct task_struct *p;
        struct task_struct *chosen = NULL;
        struct timespec uptime;
        *ppoints = 0;
 
        do_posix_clock_monotonic_gettime(&uptime);
-       do_each_thread(g, p) {
+       for_each_process(p) {
                unsigned long points;
 
                /*
@@ -253,7 +278,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
                        *ppoints = ULONG_MAX;
                }
 
-               if (p->oomkilladj == OOM_DISABLE)
+               if (p->signal->oom_adj == OOM_DISABLE)
                        continue;
 
                points = badness(p, uptime.tv_sec);
@@ -261,7 +286,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
                        chosen = p;
                        *ppoints = points;
                }
-       } while_each_thread(g, p);
+       }
 
        return chosen;
 }
@@ -286,26 +311,47 @@ static void dump_tasks(const struct mem_cgroup *mem)
        printk(KERN_INFO "[ pid ]   uid  tgid total_vm      rss cpu oom_adj "
               "name\n");
        do_each_thread(g, p) {
-               /*
-                * total_vm and rss sizes do not exist for tasks with a
-                * detached mm so there's no need to report them.
-                */
-               if (!p->mm)
-                       continue;
+               struct mm_struct *mm;
+
                if (mem && !task_in_mem_cgroup(p, mem))
                        continue;
                if (!thread_group_leader(p))
                        continue;
 
                task_lock(p);
+               mm = p->mm;
+               if (!mm) {
+                       /*
+                        * total_vm and rss sizes do not exist for tasks with no
+                        * mm so there's no need to report them; they can't be
+                        * oom killed anyway.
+                        */
+                       task_unlock(p);
+                       continue;
+               }
                printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d     %3d %s\n",
-                      p->pid, __task_cred(p)->uid, p->tgid,
-                      p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p),
-                      p->oomkilladj, p->comm);
+                      p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
+                      get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj,
+                      p->comm);
                task_unlock(p);
        } while_each_thread(g, p);
 }
 
+static void dump_header(gfp_t gfp_mask, int order, struct mem_cgroup *mem)
+{
+       pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
+               "oom_adj=%d\n",
+               current->comm, gfp_mask, order, current->signal->oom_adj);
+       task_lock(current);
+       cpuset_print_task_mems_allowed(current);
+       task_unlock(current);
+       dump_stack();
+       mem_cgroup_print_oom_info(mem, current);
+       show_mem();
+       if (sysctl_oom_dump_tasks)
+               dump_tasks(mem);
+}
+
 /*
  * Send SIGKILL to the selected  process irrespective of  CAP_SYS_RAW_IO
  * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
@@ -342,11 +388,6 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
 
 static int oom_kill_task(struct task_struct *p)
 {
-       struct mm_struct *mm;
-       struct task_struct *g, *q;
-
-       mm = p->mm;
-
        /* WARNING: mm may not be dereferenced since we did not obtain its
         * value from get_task_mm(p).  This is OK since all we need to do is
         * compare mm to q->mm below.
@@ -355,30 +396,11 @@ static int oom_kill_task(struct task_struct *p)
         * change to NULL at any time since we do not hold task_lock(p).
         * However, this is of no concern to us.
         */
-
-       if (mm == NULL)
+       if (!p->mm || p->signal->oom_adj == OOM_DISABLE)
                return 1;
 
-       /*
-        * Don't kill the process if any threads are set to OOM_DISABLE
-        */
-       do_each_thread(g, q) {
-               if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
-                       return 1;
-       } while_each_thread(g, q);
-
        __oom_kill_task(p, 1);
 
-       /*
-        * kill all processes that share the ->mm (i.e. all threads),
-        * but are in a different thread group. Don't let them have access
-        * to memory reserves though, otherwise we might deplete all memory.
-        */
-       do_each_thread(g, q) {
-               if (q->mm == mm && !same_thread_group(q, p))
-                       force_sig(SIGKILL, q);
-       } while_each_thread(g, q);
-
        return 0;
 }
 
@@ -388,15 +410,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 {
        struct task_struct *c;
 
-       if (printk_ratelimit()) {
-               printk(KERN_WARNING "%s invoked oom-killer: "
-                       "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
-                       current->comm, gfp_mask, order, current->oomkilladj);
-               dump_stack();
-               show_mem();
-               if (sysctl_oom_dump_tasks)
-                       dump_tasks(mem);
-       }
+       if (printk_ratelimit())
+               dump_header(gfp_mask, order, mem);
 
        /*
         * If the task is already exiting, don't alarm the sysadmin or kill
@@ -426,7 +441,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
        unsigned long points = 0;
        struct task_struct *p;
 
-       cgroup_lock();
        read_lock(&tasklist_lock);
 retry:
        p = select_bad_process(&points, mem);
@@ -441,7 +455,6 @@ retry:
                goto retry;
 out:
        read_unlock(&tasklist_lock);
-       cgroup_unlock();
 }
 #endif
 
@@ -470,7 +483,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
        struct zone *zone;
        int ret = 1;
 
-       spin_lock(&zone_scan_mutex);
+       spin_lock(&zone_scan_lock);
        for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
                if (zone_is_oom_locked(zone)) {
                        ret = 0;
@@ -480,7 +493,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 
        for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
                /*
-                * Lock each zone in the zonelist under zone_scan_mutex so a
+                * Lock each zone in the zonelist under zone_scan_lock so a
                 * parallel invocation of try_set_zone_oom() doesn't succeed
                 * when it shouldn't.
                 */
@@ -488,7 +501,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
        }
 
 out:
-       spin_unlock(&zone_scan_mutex);
+       spin_unlock(&zone_scan_lock);
        return ret;
 }
 
@@ -502,11 +515,11 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
        struct zoneref *z;
        struct zone *zone;
 
-       spin_lock(&zone_scan_mutex);
+       spin_lock(&zone_scan_lock);
        for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
                zone_clear_flag(zone, ZONE_OOM_LOCKED);
        }
-       spin_unlock(&zone_scan_mutex);
+       spin_unlock(&zone_scan_lock);
 }
 
 /*
@@ -514,34 +527,33 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
  */
 static void __out_of_memory(gfp_t gfp_mask, int order)
 {
-       if (sysctl_oom_kill_allocating_task) {
-               oom_kill_process(current, gfp_mask, order, 0, NULL,
-                               "Out of memory (oom_kill_allocating_task)");
-
-       } else {
-               unsigned long points;
-               struct task_struct *p;
-
-retry:
-               /*
-                * Rambo mode: Shoot down a process and hope it solves whatever
-                * issues we may have.
-                */
-               p = select_bad_process(&points, NULL);
+       struct task_struct *p;
+       unsigned long points;
 
-               if (PTR_ERR(p) == -1UL)
+       if (sysctl_oom_kill_allocating_task)
+               if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
+                               "Out of memory (oom_kill_allocating_task)"))
                        return;
+retry:
+       /*
+        * Rambo mode: Shoot down a process and hope it solves whatever
+        * issues we may have.
+        */
+       p = select_bad_process(&points, NULL);
 
-               /* Found nothing?!?! Either we hang forever, or we panic. */
-               if (!p) {
-                       read_unlock(&tasklist_lock);
-                       panic("Out of memory and no killable processes...\n");
-               }
+       if (PTR_ERR(p) == -1UL)
+               return;
 
-               if (oom_kill_process(p, gfp_mask, order, points, NULL,
-                                    "Out of memory"))
-                       goto retry;
+       /* Found nothing?!?! Either we hang forever, or we panic. */
+       if (!p) {
+               read_unlock(&tasklist_lock);
+               dump_header(gfp_mask, order, NULL);
+               panic("Out of memory and no killable processes...\n");
        }
+
+       if (oom_kill_process(p, gfp_mask, order, points, NULL,
+                            "Out of memory"))
+               goto retry;
 }
 
 /*
@@ -557,6 +569,13 @@ void pagefault_out_of_memory(void)
                /* Got some memory back in the last second. */
                return;
 
+       /*
+        * If this is from memcg, oom-killer is already invoked.
+        * and not worth to go system-wide-oom.
+        */
+       if (mem_cgroup_oom_called(current))
+               goto rest_and_return;
+
        if (sysctl_panic_on_oom)
                panic("out of memory from page fault. panic_on_oom is selected.\n");
 
@@ -568,6 +587,7 @@ void pagefault_out_of_memory(void)
         * Give "p" a good chance of killing itself before we
         * retry to allocate memory.
         */
+rest_and_return:
        if (!test_thread_flag(TIF_MEMDIE))
                schedule_timeout_uninterruptible(1);
 }
@@ -593,8 +613,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
                /* Got some memory back in the last second. */
                return;
 
-       if (sysctl_panic_on_oom == 2)
+       if (sysctl_panic_on_oom == 2) {
+               dump_header(gfp_mask, order, NULL);
                panic("out of memory. Compulsory panic_on_oom is selected.\n");
+       }
 
        /*
         * Check if there were limitations on the allocation (only relevant for
@@ -610,8 +632,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
                break;
 
        case CONSTRAINT_NONE:
-               if (sysctl_panic_on_oom)
+               if (sysctl_panic_on_oom) {
+                       dump_header(gfp_mask, order, NULL);
                        panic("out of memory. panic_on_oom is selected\n");
+               }
                /* Fall-through */
        case CONSTRAINT_CPUSET:
                __out_of_memory(gfp_mask, order);