+#define mem_cgroup_from_res_counter(counter, member) \
+ container_of(counter, struct mem_cgroup, member)
+
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+{
+ if (do_swap_account) {
+ if (res_counter_check_under_limit(&mem->res) &&
+ res_counter_check_under_limit(&mem->memsw))
+ return true;
+ } else
+ if (res_counter_check_under_limit(&mem->res))
+ return true;
+ return false;
+}
+
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+ struct cgroup *cgrp = memcg->css.cgroup;
+ unsigned int swappiness;
+
+ /* root ? */
+ if (cgrp->parent == NULL)
+ return vm_swappiness;
+
+ spin_lock(&memcg->reclaim_param_lock);
+ swappiness = memcg->swappiness;
+ spin_unlock(&memcg->reclaim_param_lock);
+
+ return swappiness;
+}
+
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
+{
+ int *val = data;
+ (*val)++;
+ return 0;
+}
+
+/**
+ * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * @memcg: The memory cgroup that went over limit
+ * @p: Task that is going to be killed
+ *
+ * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
+ * enabled
+ */
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+{
+ struct cgroup *task_cgrp;
+ struct cgroup *mem_cgrp;
+ /*
+ * Need a buffer in BSS, can't rely on allocations. The code relies
+ * on the assumption that OOM is serialized for memory controller.
+ * If this assumption is broken, revisit this code.
+ */
+ static char memcg_name[PATH_MAX];
+ int ret;
+
+ if (!memcg)
+ return;
+
+
+ rcu_read_lock();
+
+ mem_cgrp = memcg->css.cgroup;
+ task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+
+ ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
+ /*
+ * Unfortunately, we are unable to convert to a useful name
+ * But we'll still print out the usage information
+ */
+ rcu_read_unlock();
+ goto done;
+ }
+ rcu_read_unlock();
+
+ printk(KERN_INFO "Task in %s killed", memcg_name);
+
+ rcu_read_lock();
+ ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
+ rcu_read_unlock();
+ goto done;
+ }
+ rcu_read_unlock();
+
+ /*
+ * Continues from above, so we don't need an KERN_ level
+ */
+ printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
+done:
+
+ printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+ res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->res, RES_FAILCNT));
+ printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
+ "failcnt %llu\n",
+ res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
+}
+
+/*
+ * This function returns the number of memcg under hierarchy tree. Returns
+ * 1(self count) if no children.
+ */
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
+{
+ int num = 0;
+ mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+ return num;
+}
+
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_select_victim(struct mem_cgroup *root_mem)
+{
+ struct mem_cgroup *ret = NULL;
+ struct cgroup_subsys_state *css;
+ int nextid, found;
+
+ if (!root_mem->use_hierarchy) {
+ css_get(&root_mem->css);
+ ret = root_mem;
+ }
+
+ while (!ret) {
+ rcu_read_lock();
+ nextid = root_mem->last_scanned_child + 1;
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
+ &found);
+ if (css && css_tryget(css))
+ ret = container_of(css, struct mem_cgroup, css);
+
+ rcu_read_unlock();
+ /* Updates scanning parameter */
+ spin_lock(&root_mem->reclaim_param_lock);
+ if (!css) {
+ /* this means start scan from ID:1 */
+ root_mem->last_scanned_child = 0;
+ } else
+ root_mem->last_scanned_child = found;
+ spin_unlock(&root_mem->reclaim_param_lock);
+ }
+
+ return ret;
+}
+
+/*
+ * Scan the hierarchy if needed to reclaim memory. We remember the last child
+ * we reclaimed from, so that we don't end up penalizing one child extensively
+ * based on its position in the children list.
+ *
+ * root_mem is the original ancestor that we've been reclaim from.
+ *
+ * We give up and return to the caller when we visit root_mem twice.
+ * (other groups can be removed while we're walking....)
+ *
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
+ */
+static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
+ gfp_t gfp_mask, bool noswap, bool shrink)
+{
+ struct mem_cgroup *victim;
+ int ret, total = 0;
+ int loop = 0;
+
+ while (loop < 2) {
+ victim = mem_cgroup_select_victim(root_mem);
+ if (victim == root_mem)
+ loop++;
+ if (!mem_cgroup_local_usage(&victim->stat)) {
+ /* this cgroup's local usage == 0 */
+ css_put(&victim->css);
+ continue;
+ }
+ /* we use swappiness of local cgroup */
+ ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
+ get_swappiness(victim));
+ css_put(&victim->css);
+ /*
+ * At shrinking usage, we can't check we should stop here or
+ * reclaim more. It's depends on callers. last_scanned_child
+ * will work enough for keeping fairness under tree.
+ */
+ if (shrink)
+ return ret;
+ total += ret;
+ if (mem_cgroup_check_under_limit(root_mem))
+ return 1 + total;
+ }
+ return total;
+}
+
+bool mem_cgroup_oom_called(struct task_struct *task)
+{
+ bool ret = false;
+ struct mem_cgroup *mem;
+ struct mm_struct *mm;
+
+ rcu_read_lock();
+ mm = task->mm;
+ if (!mm)
+ mm = &init_mm;
+ mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
+ ret = true;
+ rcu_read_unlock();
+ return ret;
+}
+
+static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+{
+ mem->last_oom_jiffies = jiffies;
+ return 0;
+}
+
+static void record_last_oom(struct mem_cgroup *mem)
+{
+ mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+}
+
+