memcg: swapout refcnt fix
[safe/jmp/linux-2.6] / mm / memcontrol.c
index 8ce4e9e..964a700 100644 (file)
@@ -143,6 +143,17 @@ struct mem_cgroup {
        struct mem_cgroup_lru_info info;
 
        int     prev_priority;  /* for recording reclaim priority */
+
+       /*
+        * While reclaiming in a hiearchy, we cache the last child we
+        * reclaimed from. Protected by cgroup_lock()
+        */
+       struct mem_cgroup *last_scanned_child;
+       /*
+        * Should the accounting and control be hierarchical, per subtree?
+        */
+       bool use_hierarchy;
+       unsigned long   last_oom_jiffies;
        int             obsolete;
        atomic_t        refcnt;
        /*
@@ -279,7 +290,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
        struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
        /* can happen while we handle swapcache. */
@@ -302,7 +313,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
        struct mem_cgroup_per_zone *mz;
        struct page_cgroup *pc;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
 
        pc = lookup_page_cgroup(page);
@@ -319,7 +330,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
        struct page_cgroup *pc;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
        /* barrier to sync with "charge" */
@@ -344,7 +355,7 @@ static void mem_cgroup_lru_fixup(struct page *page)
 void mem_cgroup_move_lists(struct page *page,
                           enum lru_list from, enum lru_list to)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        mem_cgroup_del_lru_list(page, from);
        mem_cgroup_add_lru_list(page, to);
@@ -461,6 +472,177 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        return nr_taken;
 }
 
+#define mem_cgroup_from_res_counter(counter, member)   \
+       container_of(counter, struct mem_cgroup, member)
+
+/*
+ * This routine finds the DFS walk successor. This routine should be
+ * called with cgroup_mutex held
+ */
+static struct mem_cgroup *
+mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+{
+       struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
+
+       curr_cgroup = curr->css.cgroup;
+       root_cgroup = root_mem->css.cgroup;
+
+       if (!list_empty(&curr_cgroup->children)) {
+               /*
+                * Walk down to children
+                */
+               mem_cgroup_put(curr);
+               cgroup = list_entry(curr_cgroup->children.next,
+                                               struct cgroup, sibling);
+               curr = mem_cgroup_from_cont(cgroup);
+               mem_cgroup_get(curr);
+               goto done;
+       }
+
+visit_parent:
+       if (curr_cgroup == root_cgroup) {
+               mem_cgroup_put(curr);
+               curr = root_mem;
+               mem_cgroup_get(curr);
+               goto done;
+       }
+
+       /*
+        * Goto next sibling
+        */
+       if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
+               mem_cgroup_put(curr);
+               cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
+                                               sibling);
+               curr = mem_cgroup_from_cont(cgroup);
+               mem_cgroup_get(curr);
+               goto done;
+       }
+
+       /*
+        * Go up to next parent and next parent's sibling if need be
+        */
+       curr_cgroup = curr_cgroup->parent;
+       goto visit_parent;
+
+done:
+       root_mem->last_scanned_child = curr;
+       return curr;
+}
+
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+{
+       struct cgroup *cgroup;
+       struct mem_cgroup *ret;
+       bool obsolete = (root_mem->last_scanned_child &&
+                               root_mem->last_scanned_child->obsolete);
+
+       /*
+        * Scan all children under the mem_cgroup mem
+        */
+       cgroup_lock();
+       if (list_empty(&root_mem->css.cgroup->children)) {
+               ret = root_mem;
+               goto done;
+       }
+
+       if (!root_mem->last_scanned_child || obsolete) {
+
+               if (obsolete)
+                       mem_cgroup_put(root_mem->last_scanned_child);
+
+               cgroup = list_first_entry(&root_mem->css.cgroup->children,
+                               struct cgroup, sibling);
+               ret = mem_cgroup_from_cont(cgroup);
+               mem_cgroup_get(ret);
+       } else
+               ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
+                                               root_mem);
+
+done:
+       root_mem->last_scanned_child = ret;
+       cgroup_unlock();
+       return ret;
+}
+
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+{
+       if (do_swap_account) {
+               if (res_counter_check_under_limit(&mem->res) &&
+                       res_counter_check_under_limit(&mem->memsw))
+                       return true;
+       } else
+               if (res_counter_check_under_limit(&mem->res))
+                       return true;
+       return false;
+}
+
+/*
+ * Dance down the hierarchy if needed to reclaim memory. We remember the
+ * last child we reclaimed from, so that we don't end up penalizing
+ * one child extensively based on its position in the children list.
+ *
+ * root_mem is the original ancestor that we've been reclaim from.
+ */
+static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
+                                               gfp_t gfp_mask, bool noswap)
+{
+       struct mem_cgroup *next_mem;
+       int ret = 0;
+
+       /*
+        * Reclaim unconditionally and don't check for return value.
+        * We need to reclaim in the current group and down the tree.
+        * One might think about checking for children before reclaiming,
+        * but there might be left over accounting, even after children
+        * have left.
+        */
+       ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
+       if (mem_cgroup_check_under_limit(root_mem))
+               return 0;
+
+       next_mem = mem_cgroup_get_first_node(root_mem);
+
+       while (next_mem != root_mem) {
+               if (next_mem->obsolete) {
+                       mem_cgroup_put(next_mem);
+                       cgroup_lock();
+                       next_mem = mem_cgroup_get_first_node(root_mem);
+                       cgroup_unlock();
+                       continue;
+               }
+               ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
+               if (mem_cgroup_check_under_limit(root_mem))
+                       return 0;
+               cgroup_lock();
+               next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
+               cgroup_unlock();
+       }
+       return ret;
+}
+
+bool mem_cgroup_oom_called(struct task_struct *task)
+{
+       bool ret = false;
+       struct mem_cgroup *mem;
+       struct mm_struct *mm;
+
+       rcu_read_lock();
+       mm = task->mm;
+       if (!mm)
+               mm = &init_mm;
+       mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
+               ret = true;
+       rcu_read_unlock();
+       return ret;
+}
 /*
  * Unlike exported interface, "oom" parameter is added. if oom==true,
  * oom-killer can be invoked.
@@ -469,8 +651,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        gfp_t gfp_mask, struct mem_cgroup **memcg,
                        bool oom)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem, *mem_over_limit;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+       struct res_counter *fail_res;
+
+       if (unlikely(test_thread_flag(TIF_MEMDIE))) {
+               /* Don't account this! */
+               *memcg = NULL;
+               return 0;
+       }
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -499,22 +689,29 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                int ret;
                bool noswap = false;
 
-               ret = res_counter_charge(&mem->res, PAGE_SIZE);
+               ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
                if (likely(!ret)) {
                        if (!do_swap_account)
                                break;
-                       ret = res_counter_charge(&mem->memsw, PAGE_SIZE);
+                       ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
+                                                       &fail_res);
                        if (likely(!ret))
                                break;
                        /* mem+swap counter fails */
                        res_counter_uncharge(&mem->res, PAGE_SIZE);
                        noswap = true;
-               }
+                       mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+                                                                       memsw);
+               } else
+                       /* mem counter fails */
+                       mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+                                                                       res);
+
                if (!(gfp_mask & __GFP_WAIT))
                        goto nomem;
 
-               if (try_to_free_mem_cgroup_pages(mem, gfp_mask, noswap))
-                       continue;
+               ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
+                                                       noswap);
 
                /*
                 * try_to_free_mem_cgroup_pages() might not give us a full
@@ -524,16 +721,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                 * current usage of the cgroup before giving up
                 *
                 */
-               if (!do_swap_account &&
-                       res_counter_check_under_limit(&mem->res))
-                       continue;
-               if (do_swap_account &&
-                       res_counter_check_under_limit(&mem->memsw))
+               if (mem_cgroup_check_under_limit(mem_over_limit))
                        continue;
 
                if (!nr_retries--) {
-                       if (oom)
-                               mem_cgroup_out_of_memory(mem, gfp_mask);
+                       if (oom) {
+                               mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
+                               mem_over_limit->last_oom_jiffies = jiffies;
+                       }
                        goto nomem;
                }
        }
@@ -670,7 +865,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
 
 
        ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
-       if (ret)
+       if (ret || !parent)
                return ret;
 
        if (!get_page_unless_zero(page))
@@ -721,7 +916,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
 
        mem = memcg;
        ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
-       if (ret)
+       if (ret || !mem)
                return ret;
 
        __mem_cgroup_commit_charge(mem, pc, ctype);
@@ -731,7 +926,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
 int mem_cgroup_newpage_charge(struct page *page,
                              struct mm_struct *mm, gfp_t gfp_mask)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (PageCompound(page))
                return 0;
@@ -753,7 +948,7 @@ int mem_cgroup_newpage_charge(struct page *page,
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (PageCompound(page))
                return 0;
@@ -799,7 +994,7 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
        struct mem_cgroup *mem;
        swp_entry_t     ent;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
 
        if (!do_swap_account)
@@ -833,7 +1028,7 @@ int mem_cgroup_cache_charge_swapin(struct page *page,
 {
        int ret = 0;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (unlikely(!mm))
                mm = &init_mm;
@@ -880,7 +1075,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 {
        struct page_cgroup *pc;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        if (!ptr)
                return;
@@ -909,7 +1104,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 
 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        if (!mem)
                return;
@@ -930,7 +1125,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        struct mem_cgroup *mem = NULL;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return NULL;
 
        if (PageSwapCache(page))
@@ -976,7 +1171,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        mz = page_cgroup_zoneinfo(pc);
        unlock_page_cgroup(pc);
 
-       css_put(&mem->css);
+       /* at swapout, this memcg will be accessed to record to swap */
+       if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+               css_put(&mem->css);
 
        return mem;
 
@@ -1017,6 +1214,8 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
                swap_cgroup_record(ent, memcg);
                mem_cgroup_get(memcg);
        }
+       if (memcg)
+               css_put(&memcg->css);
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -1049,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        struct mem_cgroup *mem = NULL;
        int ret = 0;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
 
        pc = lookup_page_cgroup(page);
@@ -1061,7 +1260,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        unlock_page_cgroup(pc);
 
        if (mem) {
-               ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
+               ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
                css_put(&mem->css);
        }
        *ptr = mem;
@@ -1131,7 +1330,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
        int progress = 0;
        int retry = MEM_CGROUP_RECLAIM_RETRIES;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (!mm)
                return 0;
@@ -1147,7 +1346,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
 
        do {
                progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
-               progress += res_counter_check_under_limit(&mem->res);
+               progress += mem_cgroup_check_under_limit(mem);
        } while (!progress && --retry);
 
        css_put(&mem->css);
@@ -1191,7 +1390,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        break;
 
                progress = try_to_free_mem_cgroup_pages(memcg,
-                               GFP_HIGHUSER_MOVABLE, false);
+                               GFP_KERNEL, false);
                if (!progress)                  retry_count--;
        }
        return ret;
@@ -1231,7 +1430,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                        break;
 
                oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
-               try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
+               try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
                if (curusage >= oldusage)
                        retry_count--;
@@ -1277,7 +1476,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
                }
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-               ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
+               ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
                if (ret == -ENOMEM)
                        break;
 
@@ -1363,7 +1562,7 @@ try_to_free:
                        goto out;
                }
                progress = try_to_free_mem_cgroup_pages(mem,
-                                                 GFP_HIGHUSER_MOVABLE, false);
+                                                 GFP_KERNEL, false);
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
@@ -1385,6 +1584,44 @@ int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 }
 
 
+static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+{
+       return mem_cgroup_from_cont(cont)->use_hierarchy;
+}
+
+static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
+                                       u64 val)
+{
+       int retval = 0;
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+       struct cgroup *parent = cont->parent;
+       struct mem_cgroup *parent_mem = NULL;
+
+       if (parent)
+               parent_mem = mem_cgroup_from_cont(parent);
+
+       cgroup_lock();
+       /*
+        * If parent's use_hiearchy is set, we can't make any modifications
+        * in the child subtrees. If it is unset, then the change can
+        * occur, provided the current cgroup has no children.
+        *
+        * For the root cgroup, parent_mem is NULL, we allow value to be
+        * set if there are no children.
+        */
+       if ((!parent_mem || !parent_mem->use_hierarchy) &&
+                               (val == 1 || val == 0)) {
+               if (list_empty(&cont->children))
+                       mem->use_hierarchy = val;
+               else
+                       retval = -EBUSY;
+       } else
+               retval = -EINVAL;
+       cgroup_unlock();
+
+       return retval;
+}
+
 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
@@ -1548,6 +1785,11 @@ static struct cftype mem_cgroup_files[] = {
                .name = "force_empty",
                .trigger = mem_cgroup_force_empty_write,
        },
+       {
+               .name = "use_hierarchy",
+               .write_u64 = mem_cgroup_hierarchy_write,
+               .read_u64 = mem_cgroup_hierarchy_read,
+       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -1697,7 +1939,7 @@ static void mem_cgroup_put(struct mem_cgroup *mem)
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
 {
-       if (!mem_cgroup_subsys.disabled && really_do_swap_account)
+       if (!mem_cgroup_disabled() && really_do_swap_account)
                do_swap_account = 1;
 }
 #else
@@ -1709,22 +1951,34 @@ static void __init enable_swap_cgroup(void)
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem, *parent;
        int node;
 
        mem = mem_cgroup_alloc();
        if (!mem)
                return ERR_PTR(-ENOMEM);
 
-       res_counter_init(&mem->res);
-       res_counter_init(&mem->memsw);
-
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
        /* root ? */
-       if (cont->parent == NULL)
+       if (cont->parent == NULL) {
                enable_swap_cgroup();
+               parent = NULL;
+       } else {
+               parent = mem_cgroup_from_cont(cont->parent);
+               mem->use_hierarchy = parent->use_hierarchy;
+       }
+
+       if (parent && parent->use_hierarchy) {
+               res_counter_init(&mem->res, &parent->res);
+               res_counter_init(&mem->memsw, &parent->memsw);
+       } else {
+               res_counter_init(&mem->res, NULL);
+               res_counter_init(&mem->memsw, NULL);
+       }
+
+       mem->last_scanned_child = NULL;
 
        return &mem->css;
 free_out:
@@ -1766,25 +2020,10 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *old_cont,
                                struct task_struct *p)
 {
-       struct mm_struct *mm;
-       struct mem_cgroup *mem, *old_mem;
-
-       mm = get_task_mm(p);
-       if (mm == NULL)
-               return;
-
-       mem = mem_cgroup_from_cont(cont);
-       old_mem = mem_cgroup_from_cont(old_cont);
-
        /*
-        * Only thread group leaders are allowed to migrate, the mm_struct is
-        * in effect owned by the leader
+        * FIXME: It's better to move charges of this process from old
+        * memcg to new memcg. But it's just on TODO-List now.
         */
-       if (!thread_group_leader(p))
-               goto out;
-
-out:
-       mmput(mm);
 }
 
 struct cgroup_subsys mem_cgroup_subsys = {