mm: don't call mark_page_accessed() in do_swap_page()
[safe/jmp/linux-2.6] / mm / memcontrol.c
index b311f19..8e4be9c 100644 (file)
@@ -154,7 +154,7 @@ struct mem_cgroup {
 
        /*
         * While reclaiming in a hiearchy, we cache the last child we
-        * reclaimed from. Protected by cgroup_lock()
+        * reclaimed from. Protected by hierarchy_mutex
         */
        struct mem_cgroup *last_scanned_child;
        /*
@@ -202,6 +202,7 @@ pcg_default_flags[NR_CHARGE_TYPE] = {
 
 static void mem_cgroup_get(struct mem_cgroup *mem);
 static void mem_cgroup_put(struct mem_cgroup *mem);
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
                                         struct page_cgroup *pc,
@@ -331,8 +332,12 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
                return;
        pc = lookup_page_cgroup(page);
        /* can happen while we handle swapcache. */
-       if (list_empty(&pc->lru))
+       if (list_empty(&pc->lru) || !pc->mem_cgroup)
                return;
+       /*
+        * We don't check PCG_USED bit. It's cleared when the "page" is finally
+        * removed from global LRU.
+        */
        mz = page_cgroup_zoneinfo(pc);
        mem = pc->mem_cgroup;
        MEM_CGROUP_ZSTAT(mz, lru) -= 1;
@@ -354,6 +359,10 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
                return;
 
        pc = lookup_page_cgroup(page);
+       /*
+        * Used bit is set without atomic ops but after smp_wmb().
+        * For making pc->mem_cgroup visible, insert smp_rmb() here.
+        */
        smp_rmb();
        /* unused page is not rotated. */
        if (!PageCgroupUsed(pc))
@@ -370,7 +379,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
        if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
-       /* barrier to sync with "charge" */
+       /*
+        * Used bit is set without atomic ops but after smp_wmb().
+        * For making pc->mem_cgroup visible, insert smp_rmb() here.
+        */
        smp_rmb();
        if (!PageCgroupUsed(pc))
                return;
@@ -379,16 +391,44 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
        MEM_CGROUP_ZSTAT(mz, lru) += 1;
        list_add(&pc->lru, &mz->lists[lru]);
 }
+
 /*
- * To add swapcache into LRU. Be careful to all this function.
- * zone->lru_lock shouldn't be held and irq must not be disabled.
+ * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
+ * lru because the page may.be reused after it's fully uncharged (because of
+ * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
+ * it again. This function is only used to charge SwapCache. It's done under
+ * lock_page and expected that zone->lru_lock is never held.
  */
-static void mem_cgroup_lru_fixup(struct page *page)
+static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
 {
-       if (!isolate_lru_page(page))
-               putback_lru_page(page);
+       unsigned long flags;
+       struct zone *zone = page_zone(page);
+       struct page_cgroup *pc = lookup_page_cgroup(page);
+
+       spin_lock_irqsave(&zone->lru_lock, flags);
+       /*
+        * Forget old LRU when this page_cgroup is *not* used. This Used bit
+        * is guarded by lock_page() because the page is SwapCache.
+        */
+       if (!PageCgroupUsed(pc))
+               mem_cgroup_del_lru_list(page, page_lru(page));
+       spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
+
+static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
+{
+       unsigned long flags;
+       struct zone *zone = page_zone(page);
+       struct page_cgroup *pc = lookup_page_cgroup(page);
+
+       spin_lock_irqsave(&zone->lru_lock, flags);
+       /* link when the page is linked to LRU but page_cgroup isn't */
+       if (PageLRU(page) && list_empty(&pc->lru))
+               mem_cgroup_add_lru_list(page, page_lru(page));
+       spin_unlock_irqrestore(&zone->lru_lock, flags);
 }
 
+
 void mem_cgroup_move_lists(struct page *page,
                           enum lru_list from, enum lru_list to)
 {
@@ -527,6 +567,14 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
                return NULL;
 
        pc = lookup_page_cgroup(page);
+       /*
+        * Used bit is set without atomic ops but after smp_wmb().
+        * For making pc->mem_cgroup visible, insert smp_rmb() here.
+        */
+       smp_rmb();
+       if (!PageCgroupUsed(pc))
+               return NULL;
+
        mz = page_cgroup_zoneinfo(pc);
        if (!mz)
                return NULL;
@@ -583,10 +631,10 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 
 /*
  * This routine finds the DFS walk successor. This routine should be
- * called with cgroup_mutex held
+ * called with hierarchy_mutex held
  */
 static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
 {
        struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
 
@@ -597,19 +645,16 @@ mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
                /*
                 * Walk down to children
                 */
-               mem_cgroup_put(curr);
                cgroup = list_entry(curr_cgroup->children.next,
                                                struct cgroup, sibling);
                curr = mem_cgroup_from_cont(cgroup);
-               mem_cgroup_get(curr);
                goto done;
        }
 
 visit_parent:
        if (curr_cgroup == root_cgroup) {
-               mem_cgroup_put(curr);
-               curr = root_mem;
-               mem_cgroup_get(curr);
+               /* caller handles NULL case */
+               curr = NULL;
                goto done;
        }
 
@@ -617,11 +662,9 @@ visit_parent:
         * Goto next sibling
         */
        if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
-               mem_cgroup_put(curr);
                cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
                                                sibling);
                curr = mem_cgroup_from_cont(cgroup);
-               mem_cgroup_get(curr);
                goto done;
        }
 
@@ -632,7 +675,6 @@ visit_parent:
        goto visit_parent;
 
 done:
-       root_mem->last_scanned_child = curr;
        return curr;
 }
 
@@ -642,40 +684,46 @@ done:
  * that to reclaim free pages from.
  */
 static struct mem_cgroup *
-mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
 {
        struct cgroup *cgroup;
-       struct mem_cgroup *ret;
+       struct mem_cgroup *orig, *next;
        bool obsolete;
 
-       obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
-
        /*
         * Scan all children under the mem_cgroup mem
         */
-       cgroup_lock();
+       mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+
+       orig = root_mem->last_scanned_child;
+       obsolete = mem_cgroup_is_obsolete(orig);
+
        if (list_empty(&root_mem->css.cgroup->children)) {
-               ret = root_mem;
+               /*
+                * root_mem might have children before and last_scanned_child
+                * may point to one of them. We put it later.
+                */
+               if (orig)
+                       VM_BUG_ON(!obsolete);
+               next = NULL;
                goto done;
        }
 
-       if (!root_mem->last_scanned_child || obsolete) {
-
-               if (obsolete && root_mem->last_scanned_child)
-                       mem_cgroup_put(root_mem->last_scanned_child);
-
+       if (!orig || obsolete) {
                cgroup = list_first_entry(&root_mem->css.cgroup->children,
                                struct cgroup, sibling);
-               ret = mem_cgroup_from_cont(cgroup);
-               mem_cgroup_get(ret);
+               next = mem_cgroup_from_cont(cgroup);
        } else
-               ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
-                                               root_mem);
+               next = __mem_cgroup_get_next_node(orig, root_mem);
 
 done:
-       root_mem->last_scanned_child = ret;
-       cgroup_unlock();
-       return ret;
+       if (next)
+               mem_cgroup_get(next);
+       root_mem->last_scanned_child = next;
+       if (orig)
+               mem_cgroup_put(orig);
+       mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+       return (next) ? next : root_mem;
 }
 
 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
@@ -726,30 +774,25 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
         * but there might be left over accounting, even after children
         * have left.
         */
-       ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
+       ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
                                           get_swappiness(root_mem));
        if (mem_cgroup_check_under_limit(root_mem))
-               return 0;
+               return 1;       /* indicate reclaim has succeeded */
        if (!root_mem->use_hierarchy)
                return ret;
 
-       next_mem = mem_cgroup_get_first_node(root_mem);
+       next_mem = mem_cgroup_get_next_node(root_mem);
 
        while (next_mem != root_mem) {
                if (mem_cgroup_is_obsolete(next_mem)) {
-                       mem_cgroup_put(next_mem);
-                       cgroup_lock();
-                       next_mem = mem_cgroup_get_first_node(root_mem);
-                       cgroup_unlock();
+                       next_mem = mem_cgroup_get_next_node(root_mem);
                        continue;
                }
-               ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
+               ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
                                                   get_swappiness(next_mem));
                if (mem_cgroup_check_under_limit(root_mem))
-                       return 0;
-               cgroup_lock();
-               next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
-               cgroup_unlock();
+                       return 1;       /* indicate reclaim has succeeded */
+               next_mem = mem_cgroup_get_next_node(root_mem);
        }
        return ret;
 }
@@ -833,6 +876,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
 
                ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
                                                        noswap);
+               if (ret)
+                       continue;
 
                /*
                 * try_to_free_mem_cgroup_pages() might not give us a full
@@ -861,6 +906,23 @@ nomem:
        return -ENOMEM;
 }
 
+static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+{
+       struct mem_cgroup *mem;
+       swp_entry_t ent;
+
+       if (!PageSwapCache(page))
+               return NULL;
+
+       ent.val = page_private(page);
+       mem = lookup_swap_cgroup(ent);
+       if (!mem)
+               return NULL;
+       if (!css_tryget(&mem->css))
+               return NULL;
+       return mem;
+}
+
 /*
  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
  * USED state. If already USED, uncharge and return.
@@ -932,14 +994,15 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        if (pc->mem_cgroup != from)
                goto out;
 
-       css_put(&from->css);
        res_counter_uncharge(&from->res, PAGE_SIZE);
        mem_cgroup_charge_statistics(from, pc, false);
        if (do_swap_account)
                res_counter_uncharge(&from->memsw, PAGE_SIZE);
+       css_put(&from->css);
+
+       css_get(&to->css);
        pc->mem_cgroup = to;
        mem_cgroup_charge_statistics(to, pc, true);
-       css_get(&to->css);
        ret = 0;
 out:
        unlock_page_cgroup(pc);
@@ -972,8 +1035,10 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
        if (ret || !parent)
                return ret;
 
-       if (!get_page_unless_zero(page))
-               return -EBUSY;
+       if (!get_page_unless_zero(page)) {
+               ret = -EBUSY;
+               goto uncharge;
+       }
 
        ret = isolate_lru_page(page);
 
@@ -982,19 +1047,23 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
 
        ret = mem_cgroup_move_account(pc, child, parent);
 
-       /* drop extra refcnt by try_charge() (move_account increment one) */
-       css_put(&parent->css);
        putback_lru_page(page);
        if (!ret) {
                put_page(page);
+               /* drop extra refcnt by try_charge() */
+               css_put(&parent->css);
                return 0;
        }
-       /* uncharge if move fails */
+
 cancel:
+       put_page(page);
+uncharge:
+       /* drop extra refcnt by try_charge() */
+       css_put(&parent->css);
+       /* uncharge if move fails */
        res_counter_uncharge(&parent->res, PAGE_SIZE);
        if (do_swap_account)
                res_counter_uncharge(&parent->memsw, PAGE_SIZE);
-       put_page(page);
        return ret;
 }
 
@@ -1052,6 +1121,9 @@ int mem_cgroup_newpage_charge(struct page *page,
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
+       struct mem_cgroup *mem = NULL;
+       int ret;
+
        if (mem_cgroup_disabled())
                return 0;
        if (PageCompound(page))
@@ -1064,6 +1136,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
         * For GFP_NOWAIT case, the page may be pre-charged before calling
         * add_to_page_cache(). (See shmem.c) check it here and avoid to call
         * charge twice. (It works but has to pay a bit larger cost.)
+        * And when the page is SwapCache, it should take swap information
+        * into account. This is under lock_page() now.
         */
        if (!(gfp_mask & __GFP_WAIT)) {
                struct page_cgroup *pc;
@@ -1080,15 +1154,40 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                unlock_page_cgroup(pc);
        }
 
-       if (unlikely(!mm))
+       if (do_swap_account && PageSwapCache(page)) {
+               mem = try_get_mem_cgroup_from_swapcache(page);
+               if (mem)
+                       mm = NULL;
+                 else
+                       mem = NULL;
+               /* SwapCache may be still linked to LRU now. */
+               mem_cgroup_lru_del_before_commit_swapcache(page);
+       }
+
+       if (unlikely(!mm && !mem))
                mm = &init_mm;
 
        if (page_is_file_cache(page))
                return mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
-       else
-               return mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
+
+       ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+                               MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
+       if (mem)
+               css_put(&mem->css);
+       if (PageSwapCache(page))
+               mem_cgroup_lru_add_after_commit_swapcache(page);
+
+       if (do_swap_account && !ret && PageSwapCache(page)) {
+               swp_entry_t ent = {.val = page_private(page)};
+               /* avoid double counting */
+               mem = swap_cgroup_record(ent, NULL);
+               if (mem) {
+                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+                       mem_cgroup_put(mem);
+               }
+       }
+       return ret;
 }
 
 /*
@@ -1102,7 +1201,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
                                 gfp_t mask, struct mem_cgroup **ptr)
 {
        struct mem_cgroup *mem;
-       swp_entry_t     ent;
        int ret;
 
        if (mem_cgroup_disabled())
@@ -1110,7 +1208,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
 
        if (!do_swap_account)
                goto charge_cur_mm;
-
        /*
         * A racing thread's fault, or swapoff, may have already updated
         * the pte, and even removed page from swap cache: return success
@@ -1118,14 +1215,9 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
         */
        if (!PageSwapCache(page))
                return 0;
-
-       ent.val = page_private(page);
-
-       mem = lookup_swap_cgroup(ent);
+       mem = try_get_mem_cgroup_from_swapcache(page);
        if (!mem)
                goto charge_cur_mm;
-       if (!css_tryget(&mem->css))
-               goto charge_cur_mm;
        *ptr = mem;
        ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
        /* drop extra refcnt from tryget */
@@ -1137,61 +1229,6 @@ charge_cur_mm:
        return __mem_cgroup_try_charge(mm, mask, ptr, true);
 }
 
-#ifdef CONFIG_SWAP
-
-int mem_cgroup_cache_charge_swapin(struct page *page,
-                       struct mm_struct *mm, gfp_t mask, bool locked)
-{
-       int ret = 0;
-
-       if (mem_cgroup_disabled())
-               return 0;
-       if (unlikely(!mm))
-               mm = &init_mm;
-       if (!locked)
-               lock_page(page);
-       /*
-        * If not locked, the page can be dropped from SwapCache until
-        * we reach here.
-        */
-       if (PageSwapCache(page)) {
-               struct mem_cgroup *mem = NULL;
-               swp_entry_t ent;
-
-               ent.val = page_private(page);
-               if (do_swap_account) {
-                       mem = lookup_swap_cgroup(ent);
-                       if (mem) {
-                               if (css_tryget(&mem->css))
-                                       mm = NULL; /* charge to recorded */
-                               else
-                                       mem = NULL; /* charge to current */
-                       }
-               }
-               ret = mem_cgroup_charge_common(page, mm, mask,
-                               MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
-               /* drop extra refcnt from tryget */
-               if (mem)
-                       css_put(&mem->css);
-
-               if (!ret && do_swap_account) {
-                       /* avoid double counting */
-                       mem = swap_cgroup_record(ent, NULL);
-                       if (mem) {
-                               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-                               mem_cgroup_put(mem);
-                       }
-               }
-       }
-       if (!locked)
-               unlock_page(page);
-       /* add this page(page_cgroup) to the LRU we want. */
-       mem_cgroup_lru_fixup(page);
-
-       return ret;
-}
-#endif
-
 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 {
        struct page_cgroup *pc;
@@ -1201,7 +1238,9 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
        if (!ptr)
                return;
        pc = lookup_page_cgroup(page);
+       mem_cgroup_lru_del_before_commit_swapcache(page);
        __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+       mem_cgroup_lru_add_after_commit_swapcache(page);
        /*
         * Now swap is on-memory. This means this page may be
         * counted both as mem and swap....double count.
@@ -1220,7 +1259,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 
        }
        /* add this page(page_cgroup) to the LRU we want. */
-       mem_cgroup_lru_fixup(page);
+
 }
 
 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
@@ -1288,6 +1327,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 
        mem_cgroup_charge_statistics(mem, pc, false);
        ClearPageCgroupUsed(pc);
+       /*
+        * pc->mem_cgroup is not cleared here. It will be accessed when it's
+        * freed from LRU. This is safe because uncharged page is expected not
+        * to be reused (freed soon). Exception is SwapCache, it's handled by
+        * special functions.
+        */
 
        mz = page_cgroup_zoneinfo(pc);
        unlock_page_cgroup(pc);
@@ -1445,18 +1490,20 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
  * This is typically used for page reclaiming for shmem for reducing side
  * effect of page allocation from shmem, which is used by some mem_cgroup.
  */
-int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_shrink_usage(struct page *page,
+                           struct mm_struct *mm,
+                           gfp_t gfp_mask)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem = NULL;
        int progress = 0;
        int retry = MEM_CGROUP_RECLAIM_RETRIES;
 
        if (mem_cgroup_disabled())
                return 0;
-       if (!mm)
-               return 0;
-
-       mem = try_get_mem_cgroup_from_mm(mm);
+       if (page)
+               mem = try_get_mem_cgroup_from_swapcache(page);
+       if (!mem && mm)
+               mem = try_get_mem_cgroup_from_mm(mm);
        if (unlikely(!mem))
                return 0;
 
@@ -1638,7 +1685,7 @@ move_account:
                /* This is for making all *used* pages to be on LRU. */
                lru_add_drain_all();
                ret = 0;
-               for_each_node_state(node, N_POSSIBLE) {
+               for_each_node_state(node, N_HIGH_MEMORY) {
                        for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
                                enum lru_list l;
                                for_each_lru(l) {
@@ -1946,6 +1993,7 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
        struct mem_cgroup *parent;
+
        if (val > 100)
                return -EINVAL;
 
@@ -1953,15 +2001,22 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
                return -EINVAL;
 
        parent = mem_cgroup_from_cont(cgrp->parent);
+
+       cgroup_lock();
+
        /* If under hierarchy, only empty-root can set this value */
        if ((parent->use_hierarchy) ||
-           (memcg->use_hierarchy && !list_empty(&cgrp->children)))
+           (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
+               cgroup_unlock();
                return -EINVAL;
+       }
 
        spin_lock(&memcg->reclaim_param_lock);
        memcg->swappiness = val;
        spin_unlock(&memcg->reclaim_param_lock);
 
+       cgroup_unlock();
+
        return 0;
 }
 
@@ -2139,10 +2194,23 @@ static void mem_cgroup_get(struct mem_cgroup *mem)
 
 static void mem_cgroup_put(struct mem_cgroup *mem)
 {
-       if (atomic_dec_and_test(&mem->refcnt))
+       if (atomic_dec_and_test(&mem->refcnt)) {
+               struct mem_cgroup *parent = parent_mem_cgroup(mem);
                __mem_cgroup_free(mem);
+               if (parent)
+                       mem_cgroup_put(parent);
+       }
 }
 
+/*
+ * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
+ */
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
+{
+       if (!mem->res.parent)
+               return NULL;
+       return mem_cgroup_from_res_counter(mem->res.parent, res);
+}
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
@@ -2156,7 +2224,7 @@ static void __init enable_swap_cgroup(void)
 }
 #endif
 
-static struct cgroup_subsys_state *
+static struct cgroup_subsys_state * __ref
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
        struct mem_cgroup *mem, *parent;
@@ -2181,6 +2249,13 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        if (parent && parent->use_hierarchy) {
                res_counter_init(&mem->res, &parent->res);
                res_counter_init(&mem->memsw, &parent->memsw);
+               /*
+                * We increment refcnt of the parent to ensure that we can
+                * safely access it on res_counter_charge/uncharge.
+                * This refcnt will be decremented when freeing this
+                * mem_cgroup(see mem_cgroup_put).
+                */
+               mem_cgroup_get(parent);
        } else {
                res_counter_init(&mem->res, NULL);
                res_counter_init(&mem->memsw, NULL);
@@ -2207,7 +2282,14 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
                                struct cgroup *cont)
 {
-       mem_cgroup_put(mem_cgroup_from_cont(cont));
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
+
+       if (last_scanned_child) {
+               VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
+               mem_cgroup_put(last_scanned_child);
+       }
+       mem_cgroup_put(mem);
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,