#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
+#include <linux/security.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
+int sysctl_oom_dump_tasks;
static DEFINE_SPINLOCK(zone_scan_mutex);
/* #define DEBUG */
* of least surprise ... (be careful when you change it)
*/
-unsigned long badness(struct task_struct *p, unsigned long uptime,
- struct mem_cgroup *mem)
+unsigned long badness(struct task_struct *p, unsigned long uptime)
{
unsigned long points, cpu_time, run_time, s;
struct mm_struct *mm;
* Superuser processes are usually more important, so we make it
* less likely that we kill those.
*/
- if (__capable(p, CAP_SYS_ADMIN) || __capable(p, CAP_SYS_RESOURCE))
+ if (has_capability(p, CAP_SYS_ADMIN) ||
+ has_capability(p, CAP_SYS_RESOURCE))
points /= 4;
/*
* tend to only have this flag set on applications they think
* of as important.
*/
- if (__capable(p, CAP_SYS_RAWIO))
+ if (has_capability(p, CAP_SYS_RAWIO))
points /= 4;
/*
gfp_t gfp_mask)
{
#ifdef CONFIG_NUMA
- struct zone **z;
+ struct zone *zone;
+ struct zoneref *z;
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
nodemask_t nodes = node_states[N_HIGH_MEMORY];
- for (z = zonelist->zones; *z; z++)
- if (cpuset_zone_allowed_softwall(*z, gfp_mask))
- node_clear(zone_to_nid(*z), nodes);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+ if (cpuset_zone_allowed_softwall(zone, gfp_mask))
+ node_clear(zone_to_nid(zone), nodes);
else
return CONSTRAINT_CPUSET;
if (p->oomkilladj == OOM_DISABLE)
continue;
- points = badness(p, uptime.tv_sec, mem);
+ points = badness(p, uptime.tv_sec);
if (points > *ppoints || !chosen) {
chosen = p;
*ppoints = points;
}
/**
+ * dump_tasks - dump current memory state of all system tasks
+ * @mem: target memory controller
+ *
+ * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
+ * score, and name.
+ *
+ * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
+ * shown.
+ *
+ * Call with tasklist_lock read-locked.
+ */
+static void dump_tasks(const struct mem_cgroup *mem)
+{
+ struct task_struct *g, *p;
+
+ printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
+ "name\n");
+ do_each_thread(g, p) {
+ /*
+ * total_vm and rss sizes do not exist for tasks with a
+ * detached mm so there's no need to report them.
+ */
+ if (!p->mm)
+ continue;
+ if (mem && !task_in_mem_cgroup(p, mem))
+ continue;
+ if (!thread_group_leader(p))
+ continue;
+
+ task_lock(p);
+ printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
+ p->pid, p->uid, p->tgid, p->mm->total_vm,
+ get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj,
+ p->comm);
+ task_unlock(p);
+ } while_each_thread(g, p);
+}
+
+/*
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
* set.
}
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
- unsigned long points, const char *message)
+ unsigned long points, struct mem_cgroup *mem,
+ const char *message)
{
struct task_struct *c;
current->comm, gfp_mask, order, current->oomkilladj);
dump_stack();
show_mem();
+ if (sysctl_oom_dump_tasks)
+ dump_tasks(mem);
}
/*
return oom_kill_task(p);
}
-#ifdef CONFIG_CGROUP_MEM_CONT
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
unsigned long points = 0;
struct task_struct *p;
cgroup_lock();
- rcu_read_lock();
+ read_lock(&tasklist_lock);
retry:
p = select_bad_process(&points, mem);
if (PTR_ERR(p) == -1UL)
if (!p)
p = current;
- if (oom_kill_process(p, gfp_mask, 0, points,
+ if (oom_kill_process(p, gfp_mask, 0, points, mem,
"Memory cgroup out of memory"))
goto retry;
out:
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
cgroup_unlock();
}
#endif
* if a parallel OOM killing is already taking place that includes a zone in
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
*/
-int try_set_zone_oom(struct zonelist *zonelist)
+int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
- struct zone **z;
+ struct zoneref *z;
+ struct zone *zone;
int ret = 1;
- z = zonelist->zones;
-
spin_lock(&zone_scan_mutex);
- do {
- if (zone_is_oom_locked(*z)) {
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ if (zone_is_oom_locked(zone)) {
ret = 0;
goto out;
}
- } while (*(++z) != NULL);
+ }
+
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ /*
+ * Lock each zone in the zonelist under zone_scan_mutex so a
+ * parallel invocation of try_set_zone_oom() doesn't succeed
+ * when it shouldn't.
+ */
+ zone_set_flag(zone, ZONE_OOM_LOCKED);
+ }
- /*
- * Lock each zone in the zonelist under zone_scan_mutex so a parallel
- * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
- */
- z = zonelist->zones;
- do {
- zone_set_flag(*z, ZONE_OOM_LOCKED);
- } while (*(++z) != NULL);
out:
spin_unlock(&zone_scan_mutex);
return ret;
* allocation attempts with zonelists containing them may now recall the OOM
* killer, if necessary.
*/
-void clear_zonelist_oom(struct zonelist *zonelist)
+void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
- struct zone **z;
-
- z = zonelist->zones;
+ struct zoneref *z;
+ struct zone *zone;
spin_lock(&zone_scan_mutex);
- do {
- zone_clear_flag(*z, ZONE_OOM_LOCKED);
- } while (*(++z) != NULL);
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ zone_clear_flag(zone, ZONE_OOM_LOCKED);
+ }
spin_unlock(&zone_scan_mutex);
}
/**
* out_of_memory - kill the "best" process when we run out of memory
+ * @zonelist: zonelist pointer
+ * @gfp_mask: memory allocation flags
+ * @order: amount of memory being requested as a power of 2
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
switch (constraint) {
case CONSTRAINT_MEMORY_POLICY:
- oom_kill_process(current, gfp_mask, order, points,
+ oom_kill_process(current, gfp_mask, order, points, NULL,
"No available memory (MPOL_BIND)");
break;
/* Fall-through */
case CONSTRAINT_CPUSET:
if (sysctl_oom_kill_allocating_task) {
- oom_kill_process(current, gfp_mask, order, points,
+ oom_kill_process(current, gfp_mask, order, points, NULL,
"Out of memory (oom_kill_allocating_task)");
break;
}
panic("Out of memory and no killable processes...\n");
}
- if (oom_kill_process(p, gfp_mask, order, points,
+ if (oom_kill_process(p, gfp_mask, order, points, NULL,
"Out of memory"))
goto retry;