memory controller: soft limit interface
[safe/jmp/linux-2.6] / mm / memcontrol.c
index f0dc076..4ad3e6b 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/backing-dev.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rcupdate.h>
+#include <linux/limits.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 
 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES     5
+struct mem_cgroup *root_mem_cgroup __read_mostly;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
 int do_swap_account __read_mostly;
 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
 #else
@@ -61,7 +63,8 @@ enum mem_cgroup_stat_index {
         * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
         */
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
-       MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
+       MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
+       MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
 
@@ -95,6 +98,15 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
        return ret;
 }
 
+static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
+{
+       s64 ret;
+
+       ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
+       ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
+       return ret;
+}
+
 /*
  * per-zone information in memory controller.
  */
@@ -154,9 +166,9 @@ struct mem_cgroup {
 
        /*
         * While reclaiming in a hiearchy, we cache the last child we
-        * reclaimed from. Protected by hierarchy_mutex
+        * reclaimed from.
         */
-       struct mem_cgroup *last_scanned_child;
+       int last_scanned_child;
        /*
         * Should the accounting and control be hierarchical, per subtree?
         */
@@ -166,6 +178,9 @@ struct mem_cgroup {
 
        unsigned int    swappiness;
 
+       /* set when res.limit == memsw.limit */
+       bool            memsw_is_minimum;
+
        /*
         * statistics. This must be placed at the end of memcg.
         */
@@ -178,6 +193,7 @@ enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
        MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
        MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
+       MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
        NR_CHARGE_TYPE,
 };
 
@@ -185,13 +201,8 @@ enum charge_type {
 #define PCGF_CACHE     (1UL << PCG_CACHE)
 #define PCGF_USED      (1UL << PCG_USED)
 #define PCGF_LOCK      (1UL << PCG_LOCK)
-static const unsigned long
-pcg_default_flags[NR_CHARGE_TYPE] = {
-       PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
-       PCGF_USED | PCGF_LOCK, /* Anon */
-       PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
-       0, /* FORCE */
-};
+/* Not used, but added here for completeness */
+#define PCGF_ACCT      (1UL << PCG_ACCT)
 
 /* for encoding cft->private value on file */
 #define _MEM                   (0)
@@ -202,6 +213,7 @@ pcg_default_flags[NR_CHARGE_TYPE] = {
 
 static void mem_cgroup_get(struct mem_cgroup *mem);
 static void mem_cgroup_put(struct mem_cgroup *mem);
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
                                         struct page_cgroup *pc,
@@ -246,7 +258,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
        return mem_cgroup_zoneinfo(mem, nid, zid);
 }
 
-static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
+static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
                                        enum lru_list idx)
 {
        int nid, zid;
@@ -285,6 +297,9 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
        struct mem_cgroup *mem = NULL;
+
+       if (!mm)
+               return NULL;
        /*
         * Because we have no locks, mm->owner's may be being moved to other
         * cgroup. We use css_tryget() here even if this looks
@@ -300,11 +315,44 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return mem;
 }
 
-static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
+/*
+ * Call callback function against all cgroup under hierarchy tree.
+ */
+static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
+                         int (*func)(struct mem_cgroup *, void *))
 {
-       if (!mem)
-               return true;
-       return css_is_removed(&mem->css);
+       int found, ret, nextid;
+       struct cgroup_subsys_state *css;
+       struct mem_cgroup *mem;
+
+       if (!root->use_hierarchy)
+               return (*func)(root, data);
+
+       nextid = 1;
+       do {
+               ret = 0;
+               mem = NULL;
+
+               rcu_read_lock();
+               css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
+                                  &found);
+               if (css && css_tryget(css))
+                       mem = container_of(css, struct mem_cgroup, css);
+               rcu_read_unlock();
+
+               if (mem) {
+                       ret = (*func)(mem, data);
+                       css_put(&mem->css);
+               }
+               nextid = found + 1;
+       } while (!ret && css);
+
+       return ret;
+}
+
+static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
+{
+       return (mem == root_mem_cgroup);
 }
 
 /*
@@ -324,22 +372,24 @@ static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
 {
        struct page_cgroup *pc;
-       struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
 
        if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
        /* can happen while we handle swapcache. */
-       if (list_empty(&pc->lru) || !pc->mem_cgroup)
+       if (!TestClearPageCgroupAcctLRU(pc))
                return;
+       VM_BUG_ON(!pc->mem_cgroup);
        /*
         * We don't check PCG_USED bit. It's cleared when the "page" is finally
         * removed from global LRU.
         */
        mz = page_cgroup_zoneinfo(pc);
-       mem = pc->mem_cgroup;
        MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+       if (mem_cgroup_is_root(pc->mem_cgroup))
+               return;
+       VM_BUG_ON(list_empty(&pc->lru));
        list_del_init(&pc->lru);
        return;
 }
@@ -363,8 +413,8 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
         * For making pc->mem_cgroup visible, insert smp_rmb() here.
         */
        smp_rmb();
-       /* unused page is not rotated. */
-       if (!PageCgroupUsed(pc))
+       /* unused or root page is not rotated. */
+       if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
                return;
        mz = page_cgroup_zoneinfo(pc);
        list_move(&pc->lru, &mz->lists[lru]);
@@ -378,6 +428,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
        if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
+       VM_BUG_ON(PageCgroupAcctLRU(pc));
        /*
         * Used bit is set without atomic ops but after smp_wmb().
         * For making pc->mem_cgroup visible, insert smp_rmb() here.
@@ -388,6 +439,9 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
 
        mz = page_cgroup_zoneinfo(pc);
        MEM_CGROUP_ZSTAT(mz, lru) += 1;
+       SetPageCgroupAcctLRU(pc);
+       if (mem_cgroup_is_root(pc->mem_cgroup))
+               return;
        list_add(&pc->lru, &mz->lists[lru]);
 }
 
@@ -422,7 +476,7 @@ static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
 
        spin_lock_irqsave(&zone->lru_lock, flags);
        /* link when the page is linked to LRU but page_cgroup isn't */
-       if (PageLRU(page) && list_empty(&pc->lru))
+       if (PageLRU(page) && !PageCgroupAcctLRU(pc))
                mem_cgroup_add_lru_list(page, page_lru(page));
        spin_unlock_irqrestore(&zone->lru_lock, flags);
 }
@@ -440,31 +494,24 @@ void mem_cgroup_move_lists(struct page *page,
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 {
        int ret;
+       struct mem_cgroup *curr = NULL;
 
        task_lock(task);
-       ret = task->mm && mm_match_cgroup(task->mm, mem);
+       rcu_read_lock();
+       curr = try_get_mem_cgroup_from_mm(task->mm);
+       rcu_read_unlock();
        task_unlock(task);
+       if (!curr)
+               return 0;
+       if (curr->use_hierarchy)
+               ret = css_is_ancestor(&curr->css, &mem->css);
+       else
+               ret = (curr == mem);
+       css_put(&curr->css);
        return ret;
 }
 
 /*
- * Calculate mapped_ratio under memory controller. This will be used in
- * vmscan.c for deteremining we have to reclaim mapped pages.
- */
-int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
-{
-       long total, rss;
-
-       /*
-        * usage is recorded in bytes. But, here, we assume the number of
-        * physical pages can be represented by "long" on any arch.
-        */
-       total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
-       rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
-       return (int)((rss * 100L) / total);
-}
-
-/*
  * prev_priority control...this will be used in memory reclaim path.
  */
 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
@@ -500,8 +547,8 @@ static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_
        unsigned long gb;
        unsigned long inactive_ratio;
 
-       inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
-       active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
+       inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
+       active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
 
        gb = (inactive + active) >> (30 - PAGE_SHIFT);
        if (gb)
@@ -535,6 +582,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
        return 0;
 }
 
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+       unsigned long active;
+       unsigned long inactive;
+
+       inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
+       active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
+
+       return (active > inactive);
+}
+
 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
                                       struct zone *zone,
                                       enum lru_list lru)
@@ -597,7 +655,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        int nid = z->zone_pgdat->node_id;
        int zid = zone_idx(z);
        struct mem_cgroup_per_zone *mz;
-       int lru = LRU_FILE * !!file + !!active;
+       int lru = LRU_FILE * file + active;
+       int ret;
 
        BUG_ON(!mem_cont);
        mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
@@ -615,9 +674,19 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                        continue;
 
                scan++;
-               if (__isolate_lru_page(page, mode, file) == 0) {
+               ret = __isolate_lru_page(page, mode, file);
+               switch (ret) {
+               case 0:
                        list_move(&page->lru, dst);
+                       mem_cgroup_del_lru(page);
                        nr_taken++;
+                       break;
+               case -EBUSY:
+                       /* we don't affect global LRU but rotate in our LRU */
+                       mem_cgroup_rotate_lru_list(page, page_lru(page));
+                       break;
+               default:
+                       break;
                }
        }
 
@@ -628,172 +697,206 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 #define mem_cgroup_from_res_counter(counter, member)   \
        container_of(counter, struct mem_cgroup, member)
 
-/*
- * This routine finds the DFS walk successor. This routine should be
- * called with hierarchy_mutex held
- */
-static struct mem_cgroup *
-__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
 {
-       struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
-
-       curr_cgroup = curr->css.cgroup;
-       root_cgroup = root_mem->css.cgroup;
+       if (do_swap_account) {
+               if (res_counter_check_under_limit(&mem->res) &&
+                       res_counter_check_under_limit(&mem->memsw))
+                       return true;
+       } else
+               if (res_counter_check_under_limit(&mem->res))
+                       return true;
+       return false;
+}
 
-       if (!list_empty(&curr_cgroup->children)) {
-               /*
-                * Walk down to children
-                */
-               cgroup = list_entry(curr_cgroup->children.next,
-                                               struct cgroup, sibling);
-               curr = mem_cgroup_from_cont(cgroup);
-               goto done;
-       }
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+       struct cgroup *cgrp = memcg->css.cgroup;
+       unsigned int swappiness;
 
-visit_parent:
-       if (curr_cgroup == root_cgroup) {
-               /* caller handles NULL case */
-               curr = NULL;
-               goto done;
-       }
+       /* root ? */
+       if (cgrp->parent == NULL)
+               return vm_swappiness;
 
-       /*
-        * Goto next sibling
-        */
-       if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
-               cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
-                                               sibling);
-               curr = mem_cgroup_from_cont(cgroup);
-               goto done;
-       }
+       spin_lock(&memcg->reclaim_param_lock);
+       swappiness = memcg->swappiness;
+       spin_unlock(&memcg->reclaim_param_lock);
 
-       /*
-        * Go up to next parent and next parent's sibling if need be
-        */
-       curr_cgroup = curr_cgroup->parent;
-       goto visit_parent;
+       return swappiness;
+}
 
-done:
-       return curr;
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
+{
+       int *val = data;
+       (*val)++;
+       return 0;
 }
 
-/*
- * Visit the first child (need not be the first child as per the ordering
- * of the cgroup list, since we track last_scanned_child) of @mem and use
- * that to reclaim free pages from.
+/**
+ * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * @memcg: The memory cgroup that went over limit
+ * @p: Task that is going to be killed
+ *
+ * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
+ * enabled
  */
-static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
-       struct cgroup *cgroup;
-       struct mem_cgroup *orig, *next;
-       bool obsolete;
-
+       struct cgroup *task_cgrp;
+       struct cgroup *mem_cgrp;
        /*
-        * Scan all children under the mem_cgroup mem
+        * Need a buffer in BSS, can't rely on allocations. The code relies
+        * on the assumption that OOM is serialized for memory controller.
+        * If this assumption is broken, revisit this code.
         */
-       mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+       static char memcg_name[PATH_MAX];
+       int ret;
 
-       orig = root_mem->last_scanned_child;
-       obsolete = mem_cgroup_is_obsolete(orig);
+       if (!memcg)
+               return;
 
-       if (list_empty(&root_mem->css.cgroup->children)) {
+
+       rcu_read_lock();
+
+       mem_cgrp = memcg->css.cgroup;
+       task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+
+       ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
+       if (ret < 0) {
                /*
-                * root_mem might have children before and last_scanned_child
-                * may point to one of them. We put it later.
+                * Unfortunately, we are unable to convert to a useful name
+                * But we'll still print out the usage information
                 */
-               if (orig)
-                       VM_BUG_ON(!obsolete);
-               next = NULL;
+               rcu_read_unlock();
                goto done;
        }
+       rcu_read_unlock();
 
-       if (!orig || obsolete) {
-               cgroup = list_first_entry(&root_mem->css.cgroup->children,
-                               struct cgroup, sibling);
-               next = mem_cgroup_from_cont(cgroup);
-       } else
-               next = __mem_cgroup_get_next_node(orig, root_mem);
+       printk(KERN_INFO "Task in %s killed", memcg_name);
+
+       rcu_read_lock();
+       ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
+       if (ret < 0) {
+               rcu_read_unlock();
+               goto done;
+       }
+       rcu_read_unlock();
 
+       /*
+        * Continues from above, so we don't need an KERN_ level
+        */
+       printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
 done:
-       if (next)
-               mem_cgroup_get(next);
-       root_mem->last_scanned_child = next;
-       if (orig)
-               mem_cgroup_put(orig);
-       mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
-       return (next) ? next : root_mem;
+
+       printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+               res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
+               res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
+               res_counter_read_u64(&memcg->res, RES_FAILCNT));
+       printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
+               "failcnt %llu\n",
+               res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
+               res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
+               res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
 }
 
-static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+/*
+ * This function returns the number of memcg under hierarchy tree. Returns
+ * 1(self count) if no children.
+ */
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
 {
-       if (do_swap_account) {
-               if (res_counter_check_under_limit(&mem->res) &&
-                       res_counter_check_under_limit(&mem->memsw))
-                       return true;
-       } else
-               if (res_counter_check_under_limit(&mem->res))
-                       return true;
-       return false;
+       int num = 0;
+       mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+       return num;
 }
 
-static unsigned int get_swappiness(struct mem_cgroup *memcg)
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_select_victim(struct mem_cgroup *root_mem)
 {
-       struct cgroup *cgrp = memcg->css.cgroup;
-       unsigned int swappiness;
+       struct mem_cgroup *ret = NULL;
+       struct cgroup_subsys_state *css;
+       int nextid, found;
 
-       /* root ? */
-       if (cgrp->parent == NULL)
-               return vm_swappiness;
+       if (!root_mem->use_hierarchy) {
+               css_get(&root_mem->css);
+               ret = root_mem;
+       }
 
-       spin_lock(&memcg->reclaim_param_lock);
-       swappiness = memcg->swappiness;
-       spin_unlock(&memcg->reclaim_param_lock);
+       while (!ret) {
+               rcu_read_lock();
+               nextid = root_mem->last_scanned_child + 1;
+               css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
+                                  &found);
+               if (css && css_tryget(css))
+                       ret = container_of(css, struct mem_cgroup, css);
+
+               rcu_read_unlock();
+               /* Updates scanning parameter */
+               spin_lock(&root_mem->reclaim_param_lock);
+               if (!css) {
+                       /* this means start scan from ID:1 */
+                       root_mem->last_scanned_child = 0;
+               } else
+                       root_mem->last_scanned_child = found;
+               spin_unlock(&root_mem->reclaim_param_lock);
+       }
 
-       return swappiness;
+       return ret;
 }
 
 /*
- * Dance down the hierarchy if needed to reclaim memory. We remember the
- * last child we reclaimed from, so that we don't end up penalizing
- * one child extensively based on its position in the children list.
+ * Scan the hierarchy if needed to reclaim memory. We remember the last child
+ * we reclaimed from, so that we don't end up penalizing one child extensively
+ * based on its position in the children list.
  *
  * root_mem is the original ancestor that we've been reclaim from.
+ *
+ * We give up and return to the caller when we visit root_mem twice.
+ * (other groups can be removed while we're walking....)
+ *
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
  */
 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
-                                               gfp_t gfp_mask, bool noswap)
-{
-       struct mem_cgroup *next_mem;
-       int ret = 0;
-
-       /*
-        * Reclaim unconditionally and don't check for return value.
-        * We need to reclaim in the current group and down the tree.
-        * One might think about checking for children before reclaiming,
-        * but there might be left over accounting, even after children
-        * have left.
-        */
-       ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
-                                          get_swappiness(root_mem));
-       if (mem_cgroup_check_under_limit(root_mem))
-               return 1;       /* indicate reclaim has succeeded */
-       if (!root_mem->use_hierarchy)
-               return ret;
-
-       next_mem = mem_cgroup_get_next_node(root_mem);
-
-       while (next_mem != root_mem) {
-               if (mem_cgroup_is_obsolete(next_mem)) {
-                       next_mem = mem_cgroup_get_next_node(root_mem);
+                                  gfp_t gfp_mask, bool noswap, bool shrink)
+{
+       struct mem_cgroup *victim;
+       int ret, total = 0;
+       int loop = 0;
+
+       /* If memsw_is_minimum==1, swap-out is of-no-use. */
+       if (root_mem->memsw_is_minimum)
+               noswap = true;
+
+       while (loop < 2) {
+               victim = mem_cgroup_select_victim(root_mem);
+               if (victim == root_mem)
+                       loop++;
+               if (!mem_cgroup_local_usage(&victim->stat)) {
+                       /* this cgroup's local usage == 0 */
+                       css_put(&victim->css);
                        continue;
                }
-               ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
-                                                  get_swappiness(next_mem));
+               /* we use swappiness of local cgroup */
+               ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
+                                                  get_swappiness(victim));
+               css_put(&victim->css);
+               /*
+                * At shrinking usage, we can't check we should stop here or
+                * reclaim more. It's depends on callers. last_scanned_child
+                * will work enough for keeping fairness under tree.
+                */
+               if (shrink)
+                       return ret;
+               total += ret;
                if (mem_cgroup_check_under_limit(root_mem))
-                       return 1;       /* indicate reclaim has succeeded */
-               next_mem = mem_cgroup_get_next_node(root_mem);
+                       return 1 + total;
        }
-       return ret;
+       return total;
 }
 
 bool mem_cgroup_oom_called(struct task_struct *task)
@@ -812,6 +915,57 @@ bool mem_cgroup_oom_called(struct task_struct *task)
        rcu_read_unlock();
        return ret;
 }
+
+static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+{
+       mem->last_oom_jiffies = jiffies;
+       return 0;
+}
+
+static void record_last_oom(struct mem_cgroup *mem)
+{
+       mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+}
+
+/*
+ * Currently used to update mapped file statistics, but the routine can be
+ * generalized to update other statistics as well.
+ */
+void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+{
+       struct mem_cgroup *mem;
+       struct mem_cgroup_stat *stat;
+       struct mem_cgroup_stat_cpu *cpustat;
+       int cpu;
+       struct page_cgroup *pc;
+
+       if (!page_is_file_cache(page))
+               return;
+
+       pc = lookup_page_cgroup(page);
+       if (unlikely(!pc))
+               return;
+
+       lock_page_cgroup(pc);
+       mem = pc->mem_cgroup;
+       if (!mem)
+               goto done;
+
+       if (!PageCgroupUsed(pc))
+               goto done;
+
+       /*
+        * Preemption is already disabled, we don't need get_cpu()
+        */
+       cpu = smp_processor_id();
+       stat = &mem->stat;
+       cpustat = &stat->cpustat[cpu];
+
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+done:
+       unlock_page_cgroup(pc);
+}
+
 /*
  * Unlike exported interface, "oom" parameter is added. if oom==true,
  * oom-killer can be invoked.
@@ -846,7 +1000,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
        if (unlikely(!mem))
                return 0;
 
-       VM_BUG_ON(mem_cgroup_is_obsolete(mem));
+       VM_BUG_ON(css_is_removed(&mem->css));
 
        while (1) {
                int ret;
@@ -874,7 +1028,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        goto nomem;
 
                ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
-                                                       noswap);
+                                                       noswap, false);
                if (ret)
                        continue;
 
@@ -894,7 +1048,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                                mutex_lock(&memcg_tasklist);
                                mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
                                mutex_unlock(&memcg_tasklist);
-                               mem_over_limit->last_oom_jiffies = jiffies;
+                               record_last_oom(mem_over_limit);
                        }
                        goto nomem;
                }
@@ -905,20 +1059,54 @@ nomem:
        return -ENOMEM;
 }
 
+
+/*
+ * A helper function to get mem_cgroup from ID. must be called under
+ * rcu_read_lock(). The caller must check css_is_removed() or some if
+ * it's concern. (dropping refcnt from swap can be called against removed
+ * memcg.)
+ */
+static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
+{
+       struct cgroup_subsys_state *css;
+
+       /* ID 0 is unused ID */
+       if (!id)
+               return NULL;
+       css = css_lookup(&mem_cgroup_subsys, id);
+       if (!css)
+               return NULL;
+       return container_of(css, struct mem_cgroup, css);
+}
+
 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
 {
        struct mem_cgroup *mem;
+       struct page_cgroup *pc;
+       unsigned short id;
        swp_entry_t ent;
 
+       VM_BUG_ON(!PageLocked(page));
+
        if (!PageSwapCache(page))
                return NULL;
 
-       ent.val = page_private(page);
-       mem = lookup_swap_cgroup(ent);
-       if (!mem)
-               return NULL;
-       if (!css_tryget(&mem->css))
-               return NULL;
+       pc = lookup_page_cgroup(page);
+       lock_page_cgroup(pc);
+       if (PageCgroupUsed(pc)) {
+               mem = pc->mem_cgroup;
+               if (mem && !css_tryget(&mem->css))
+                       mem = NULL;
+       } else {
+               ent.val = page_private(page);
+               id = lookup_swap_cgroup(ent);
+               rcu_read_lock();
+               mem = mem_cgroup_lookup(id);
+               if (mem && !css_tryget(&mem->css))
+                       mem = NULL;
+               rcu_read_unlock();
+       }
+       unlock_page_cgroup(pc);
        return mem;
 }
 
@@ -944,9 +1132,29 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
                css_put(&mem->css);
                return;
        }
+
        pc->mem_cgroup = mem;
+       /*
+        * We access a page_cgroup asynchronously without lock_page_cgroup().
+        * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
+        * is accessed after testing USED bit. To make pc->mem_cgroup visible
+        * before USED bit, we need memory barrier here.
+        * See mem_cgroup_add_lru_list(), etc.
+        */
        smp_wmb();
-       pc->flags = pcg_default_flags[ctype];
+       switch (ctype) {
+       case MEM_CGROUP_CHARGE_TYPE_CACHE:
+       case MEM_CGROUP_CHARGE_TYPE_SHMEM:
+               SetPageCgroupCache(pc);
+               SetPageCgroupUsed(pc);
+               break;
+       case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+               ClearPageCgroupCache(pc);
+               SetPageCgroupUsed(pc);
+               break;
+       default:
+               break;
+       }
 
        mem_cgroup_charge_statistics(mem, pc, true);
 
@@ -975,6 +1183,10 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        struct mem_cgroup_per_zone *from_mz, *to_mz;
        int nid, zid;
        int ret = -EBUSY;
+       struct page *page;
+       int cpu;
+       struct mem_cgroup_stat *stat;
+       struct mem_cgroup_stat_cpu *cpustat;
 
        VM_BUG_ON(from == to);
        VM_BUG_ON(PageLRU(pc->page));
@@ -995,6 +1207,23 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
 
        res_counter_uncharge(&from->res, PAGE_SIZE);
        mem_cgroup_charge_statistics(from, pc, false);
+
+       page = pc->page;
+       if (page_is_file_cache(page) && page_mapped(page)) {
+               cpu = smp_processor_id();
+               /* Update mapped_file data for mem_cgroup "from" */
+               stat = &from->stat;
+               cpustat = &stat->cpustat[cpu];
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+                                               -1);
+
+               /* Update mapped_file data for mem_cgroup "to" */
+               stat = &to->stat;
+               cpustat = &stat->cpustat[cpu];
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+                                               1);
+       }
+
        if (do_swap_account)
                res_counter_uncharge(&from->memsw, PAGE_SIZE);
        css_put(&from->css);
@@ -1005,6 +1234,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        ret = 0;
 out:
        unlock_page_cgroup(pc);
+       /*
+        * We charges against "to" which may not have any tasks. Then, "to"
+        * can be under rmdir(). But in current implementation, caller of
+        * this function is just force_empty() and it's garanteed that
+        * "to" is never removed. So, we don't check rmdir status here.
+        */
        return ret;
 }
 
@@ -1117,6 +1352,10 @@ int mem_cgroup_newpage_charge(struct page *page,
                                MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
 }
 
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+                                       enum charge_type ctype);
+
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
@@ -1153,16 +1392,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                unlock_page_cgroup(pc);
        }
 
-       if (do_swap_account && PageSwapCache(page)) {
-               mem = try_get_mem_cgroup_from_swapcache(page);
-               if (mem)
-                       mm = NULL;
-                 else
-                       mem = NULL;
-               /* SwapCache may be still linked to LRU now. */
-               mem_cgroup_lru_del_before_commit_swapcache(page);
-       }
-
        if (unlikely(!mm && !mem))
                mm = &init_mm;
 
@@ -1170,22 +1399,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                return mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
 
-       ret = mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
-       if (mem)
-               css_put(&mem->css);
-       if (PageSwapCache(page))
-               mem_cgroup_lru_add_after_commit_swapcache(page);
+       /* shmem */
+       if (PageSwapCache(page)) {
+               ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+               if (!ret)
+                       __mem_cgroup_commit_charge_swapin(page, mem,
+                                       MEM_CGROUP_CHARGE_TYPE_SHMEM);
+       } else
+               ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+                                       MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
 
-       if (do_swap_account && !ret && PageSwapCache(page)) {
-               swp_entry_t ent = {.val = page_private(page)};
-               /* avoid double counting */
-               mem = swap_cgroup_record(ent, NULL);
-               if (mem) {
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-                       mem_cgroup_put(mem);
-               }
-       }
        return ret;
 }
 
@@ -1228,7 +1451,9 @@ charge_cur_mm:
        return __mem_cgroup_try_charge(mm, mask, ptr, true);
 }
 
-void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+                                       enum charge_type ctype)
 {
        struct page_cgroup *pc;
 
@@ -1236,9 +1461,10 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
                return;
        if (!ptr)
                return;
+       cgroup_exclude_rmdir(&ptr->css);
        pc = lookup_page_cgroup(page);
        mem_cgroup_lru_del_before_commit_swapcache(page);
-       __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+       __mem_cgroup_commit_charge(ptr, pc, ctype);
        mem_cgroup_lru_add_after_commit_swapcache(page);
        /*
         * Now swap is on-memory. This means this page may be
@@ -1249,16 +1475,34 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
         */
        if (do_swap_account && PageSwapCache(page)) {
                swp_entry_t ent = {.val = page_private(page)};
+               unsigned short id;
                struct mem_cgroup *memcg;
-               memcg = swap_cgroup_record(ent, NULL);
+
+               id = swap_cgroup_record(ent, 0);
+               rcu_read_lock();
+               memcg = mem_cgroup_lookup(id);
                if (memcg) {
+                       /*
+                        * This recorded memcg can be obsolete one. So, avoid
+                        * calling css_tryget
+                        */
                        res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                        mem_cgroup_put(memcg);
                }
-
+               rcu_read_unlock();
        }
-       /* add this page(page_cgroup) to the LRU we want. */
+       /*
+        * At swapin, we may charge account against cgroup which has no tasks.
+        * So, rmdir()->pre_destroy() can be called while we do this charge.
+        * In that case, we need to call pre_destroy() again. check it here.
+        */
+       cgroup_release_and_wakeup_rmdir(&ptr->css);
+}
 
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+       __mem_cgroup_commit_charge_swapin(page, ptr,
+                                       MEM_CGROUP_CHARGE_TYPE_MAPPED);
 }
 
 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
@@ -1306,6 +1550,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 
        switch (ctype) {
        case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+       case MEM_CGROUP_CHARGE_TYPE_DROP:
                if (page_mapped(page))
                        goto unlock_out;
                break;
@@ -1323,8 +1568,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        res_counter_uncharge(&mem->res, PAGE_SIZE);
        if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
                res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-
        mem_cgroup_charge_statistics(mem, pc, false);
+
        ClearPageCgroupUsed(pc);
        /*
         * pc->mem_cgroup is not cleared here. It will be accessed when it's
@@ -1364,24 +1609,31 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
 }
 
+#ifdef CONFIG_SWAP
 /*
- * called from __delete_from_swap_cache() and drop "page" account.
+ * called after __delete_from_swap_cache() and drop "page" account.
  * memcg information is recorded to swap_cgroup of "ent"
  */
-void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
 {
        struct mem_cgroup *memcg;
+       int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
+
+       if (!swapout) /* this was a swap cache but the swap is unused ! */
+               ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+
+       memcg = __mem_cgroup_uncharge_common(page, ctype);
 
-       memcg = __mem_cgroup_uncharge_common(page,
-                                       MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
        /* record memcg information */
-       if (do_swap_account && memcg) {
-               swap_cgroup_record(ent, memcg);
+       if (do_swap_account && swapout && memcg) {
+               swap_cgroup_record(ent, css_id(&memcg->css));
                mem_cgroup_get(memcg);
        }
-       if (memcg)
+       if (swapout && memcg)
                css_put(&memcg->css);
 }
+#endif
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 /*
@@ -1391,15 +1643,23 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
 void mem_cgroup_uncharge_swap(swp_entry_t ent)
 {
        struct mem_cgroup *memcg;
+       unsigned short id;
 
        if (!do_swap_account)
                return;
 
-       memcg = swap_cgroup_record(ent, NULL);
+       id = swap_cgroup_record(ent, 0);
+       rcu_read_lock();
+       memcg = mem_cgroup_lookup(id);
        if (memcg) {
+               /*
+                * We uncharge this because swap is freed.
+                * This memcg can be obsolete one. We avoid calling css_tryget
+                */
                res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                mem_cgroup_put(memcg);
        }
+       rcu_read_unlock();
 }
 #endif
 
@@ -1442,7 +1702,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
 
        if (!mem)
                return;
-
+       cgroup_exclude_rmdir(&mem->css);
        /* at migration success, oldpage->mapping is NULL. */
        if (oldpage->mapping) {
                target = oldpage;
@@ -1482,39 +1742,37 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
         */
        if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
                mem_cgroup_uncharge_page(target);
+       /*
+        * At migration, we may charge account against cgroup which has no tasks
+        * So, rmdir()->pre_destroy() can be called while we do this charge.
+        * In that case, we need to call pre_destroy() again. check it here.
+        */
+       cgroup_release_and_wakeup_rmdir(&mem->css);
 }
 
 /*
- * A call to try to shrink memory usage under specified resource controller.
- * This is typically used for page reclaiming for shmem for reducing side
- * effect of page allocation from shmem, which is used by some mem_cgroup.
+ * A call to try to shrink memory usage on charge failure at shmem's swapin.
+ * Calling hierarchical_reclaim is not enough because we should update
+ * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
+ * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
+ * not from the memcg which this page would be charged to.
+ * try_charge_swapin does all of these works properly.
  */
-int mem_cgroup_shrink_usage(struct page *page,
+int mem_cgroup_shmem_charge_fallback(struct page *page,
                            struct mm_struct *mm,
                            gfp_t gfp_mask)
 {
        struct mem_cgroup *mem = NULL;
-       int progress = 0;
-       int retry = MEM_CGROUP_RECLAIM_RETRIES;
+       int ret;
 
        if (mem_cgroup_disabled())
                return 0;
-       if (page)
-               mem = try_get_mem_cgroup_from_swapcache(page);
-       if (!mem && mm)
-               mem = try_get_mem_cgroup_from_mm(mm);
-       if (unlikely(!mem))
-               return 0;
 
-       do {
-               progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
-               progress += mem_cgroup_check_under_limit(mem);
-       } while (!progress && --retry);
+       ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+       if (!ret)
+               mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
 
-       css_put(&mem->css);
-       if (!retry)
-               return -ENOMEM;
-       return 0;
+       return ret;
 }
 
 static DEFINE_MUTEX(set_limit_mutex);
@@ -1522,11 +1780,21 @@ static DEFINE_MUTEX(set_limit_mutex);
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                                unsigned long long val)
 {
-
-       int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+       int retry_count;
        int progress;
        u64 memswlimit;
        int ret = 0;
+       int children = mem_cgroup_count_children(memcg);
+       u64 curusage, oldusage;
+
+       /*
+        * For keeping hierarchical_reclaim simple, how long we should retry
+        * is depends on callers. We set our retry-count to be function
+        * of # of children which we should visit in this loop.
+        */
+       retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
+
+       oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
 
        while (retry_count) {
                if (signal_pending(current)) {
@@ -1546,29 +1814,41 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        break;
                }
                ret = res_counter_set_limit(&memcg->res, val);
+               if (!ret) {
+                       if (memswlimit == val)
+                               memcg->memsw_is_minimum = true;
+                       else
+                               memcg->memsw_is_minimum = false;
+               }
                mutex_unlock(&set_limit_mutex);
 
                if (!ret)
                        break;
 
                progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
-                                                          false);
-               if (!progress)                  retry_count--;
+                                                  false, true);
+               curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
+               /* Usage is reduced ? */
+               if (curusage >= oldusage)
+                       retry_count--;
+               else
+                       oldusage = curusage;
        }
 
        return ret;
 }
 
-int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-                               unsigned long long val)
+static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+                                       unsigned long long val)
 {
-       int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+       int retry_count;
        u64 memlimit, oldusage, curusage;
-       int ret;
-
-       if (!do_swap_account)
-               return -EINVAL;
+       int children = mem_cgroup_count_children(memcg);
+       int ret = -EBUSY;
 
+       /* see mem_cgroup_resize_res_limit */
+       retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
+       oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
        while (retry_count) {
                if (signal_pending(current)) {
                        ret = -EINTR;
@@ -1587,16 +1867,24 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                        break;
                }
                ret = res_counter_set_limit(&memcg->memsw, val);
+               if (!ret) {
+                       if (memlimit == val)
+                               memcg->memsw_is_minimum = true;
+                       else
+                               memcg->memsw_is_minimum = false;
+               }
                mutex_unlock(&set_limit_mutex);
 
                if (!ret)
                        break;
 
-               oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
-               mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+               mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+               /* Usage is reduced ? */
                if (curusage >= oldusage)
                        retry_count--;
+               else
+                       oldusage = curusage;
        }
        return ret;
 }
@@ -1684,7 +1972,7 @@ move_account:
                /* This is for making all *used* pages to be on LRU. */
                lru_add_drain_all();
                ret = 0;
-               for_each_node_state(node, N_POSSIBLE) {
+               for_each_node_state(node, N_HIGH_MEMORY) {
                        for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
                                enum lru_list l;
                                for_each_lru(l) {
@@ -1729,7 +2017,7 @@ try_to_free:
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
                }
 
        }
@@ -1798,8 +2086,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
                val = res_counter_read_u64(&mem->res, name);
                break;
        case _MEMSWAP:
-               if (do_swap_account)
-                       val = res_counter_read_u64(&mem->memsw, name);
+               val = res_counter_read_u64(&mem->memsw, name);
                break;
        default:
                BUG();
@@ -1823,6 +2110,10 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
        name = MEMFILE_ATTR(cft->private);
        switch (name) {
        case RES_LIMIT:
+               if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
+                       ret = -EINVAL;
+                       break;
+               }
                /* This function does all necessary parse...reuse it */
                ret = res_counter_memparse_write_strategy(buffer, &val);
                if (ret)
@@ -1832,6 +2123,20 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                else
                        ret = mem_cgroup_resize_memsw_limit(memcg, val);
                break;
+       case RES_SOFT_LIMIT:
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (ret)
+                       break;
+               /*
+                * For memsw, soft limits are hard to implement in terms
+                * of semantics, for now, we support soft limits for
+                * control without swap
+                */
+               if (type == _MEM)
+                       ret = res_counter_set_soft_limit(&memcg->res, val);
+               else
+                       ret = -EINVAL;
+               break;
        default:
                ret = -EINVAL; /* should be BUG() ? */
                break;
@@ -1892,54 +2197,94 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
        return 0;
 }
 
-static const struct mem_cgroup_stat_desc {
-       const char *msg;
-       u64 unit;
-} mem_cgroup_stat_desc[] = {
-       [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
-       [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
-       [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
-       [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
+
+/* For read statistics */
+enum {
+       MCS_CACHE,
+       MCS_RSS,
+       MCS_MAPPED_FILE,
+       MCS_PGPGIN,
+       MCS_PGPGOUT,
+       MCS_INACTIVE_ANON,
+       MCS_ACTIVE_ANON,
+       MCS_INACTIVE_FILE,
+       MCS_ACTIVE_FILE,
+       MCS_UNEVICTABLE,
+       NR_MCS_STAT,
 };
 
+struct mcs_total_stat {
+       s64 stat[NR_MCS_STAT];
+};
+
+struct {
+       char *local_name;
+       char *total_name;
+} memcg_stat_strings[NR_MCS_STAT] = {
+       {"cache", "total_cache"},
+       {"rss", "total_rss"},
+       {"mapped_file", "total_mapped_file"},
+       {"pgpgin", "total_pgpgin"},
+       {"pgpgout", "total_pgpgout"},
+       {"inactive_anon", "total_inactive_anon"},
+       {"active_anon", "total_active_anon"},
+       {"inactive_file", "total_inactive_file"},
+       {"active_file", "total_active_file"},
+       {"unevictable", "total_unevictable"}
+};
+
+
+static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+{
+       struct mcs_total_stat *s = data;
+       s64 val;
+
+       /* per cpu stat */
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
+       s->stat[MCS_CACHE] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+       s->stat[MCS_RSS] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
+       s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
+       s->stat[MCS_PGPGIN] += val;
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+       s->stat[MCS_PGPGOUT] += val;
+
+       /* per zone stat */
+       val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
+       s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
+       val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
+       s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
+       val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
+       s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
+       val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
+       s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
+       val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
+       s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
+       return 0;
+}
+
+static void
+mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
+{
+       mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+}
+
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
                                 struct cgroup_map_cb *cb)
 {
        struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
-       struct mem_cgroup_stat *stat = &mem_cont->stat;
+       struct mcs_total_stat mystat;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
-               s64 val;
+       memset(&mystat, 0, sizeof(mystat));
+       mem_cgroup_get_local_stat(mem_cont, &mystat);
 
-               val = mem_cgroup_read_stat(stat, i);
-               val *= mem_cgroup_stat_desc[i].unit;
-               cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
-       }
-       /* showing # of active pages */
-       {
-               unsigned long active_anon, inactive_anon;
-               unsigned long active_file, inactive_file;
-               unsigned long unevictable;
-
-               inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
-                                               LRU_INACTIVE_ANON);
-               active_anon = mem_cgroup_get_all_zonestat(mem_cont,
-                                               LRU_ACTIVE_ANON);
-               inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
-                                               LRU_INACTIVE_FILE);
-               active_file = mem_cgroup_get_all_zonestat(mem_cont,
-                                               LRU_ACTIVE_FILE);
-               unevictable = mem_cgroup_get_all_zonestat(mem_cont,
-                                                       LRU_UNEVICTABLE);
-
-               cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
-               cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
-               cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
-               cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
-               cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
+       for (i = 0; i < NR_MCS_STAT; i++)
+               cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
 
-       }
+       /* Hierarchical information */
        {
                unsigned long long limit, memsw_limit;
                memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
@@ -1948,6 +2293,12 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
                        cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
        }
 
+       memset(&mystat, 0, sizeof(mystat));
+       mem_cgroup_get_total_stat(mem_cont, &mystat);
+       for (i = 0; i < NR_MCS_STAT; i++)
+               cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+
+
 #ifdef CONFIG_DEBUG_VM
        cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
 
@@ -1992,6 +2343,7 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
        struct mem_cgroup *parent;
+
        if (val > 100)
                return -EINVAL;
 
@@ -1999,15 +2351,22 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
                return -EINVAL;
 
        parent = mem_cgroup_from_cont(cgrp->parent);
+
+       cgroup_lock();
+
        /* If under hierarchy, only empty-root can set this value */
        if ((parent->use_hierarchy) ||
-           (memcg->use_hierarchy && !list_empty(&cgrp->children)))
+           (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
+               cgroup_unlock();
                return -EINVAL;
+       }
 
        spin_lock(&memcg->reclaim_param_lock);
        memcg->swappiness = val;
        spin_unlock(&memcg->reclaim_param_lock);
 
+       cgroup_unlock();
+
        return 0;
 }
 
@@ -2031,6 +2390,12 @@ static struct cftype mem_cgroup_files[] = {
                .read_u64 = mem_cgroup_read,
        },
        {
+               .name = "soft_limit_in_bytes",
+               .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
+               .write_string = mem_cgroup_write,
+               .read_u64 = mem_cgroup_read,
+       },
+       {
                .name = "failcnt",
                .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
                .trigger = mem_cgroup_reset,
@@ -2169,6 +2534,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
 {
        int node;
 
+       free_css_id(&mem_cgroup_subsys, &mem->css);
+
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
 
@@ -2185,10 +2552,23 @@ static void mem_cgroup_get(struct mem_cgroup *mem)
 
 static void mem_cgroup_put(struct mem_cgroup *mem)
 {
-       if (atomic_dec_and_test(&mem->refcnt))
+       if (atomic_dec_and_test(&mem->refcnt)) {
+               struct mem_cgroup *parent = parent_mem_cgroup(mem);
                __mem_cgroup_free(mem);
+               if (parent)
+                       mem_cgroup_put(parent);
+       }
 }
 
+/*
+ * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
+ */
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
+{
+       if (!mem->res.parent)
+               return NULL;
+       return mem_cgroup_from_res_counter(mem->res.parent, res);
+}
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
@@ -2206,11 +2586,12 @@ static struct cgroup_subsys_state * __ref
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
        struct mem_cgroup *mem, *parent;
+       long error = -ENOMEM;
        int node;
 
        mem = mem_cgroup_alloc();
        if (!mem)
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(error);
 
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
@@ -2219,6 +2600,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        if (cont->parent == NULL) {
                enable_swap_cgroup();
                parent = NULL;
+               root_mem_cgroup = mem;
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
                mem->use_hierarchy = parent->use_hierarchy;
@@ -2227,11 +2609,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        if (parent && parent->use_hierarchy) {
                res_counter_init(&mem->res, &parent->res);
                res_counter_init(&mem->memsw, &parent->memsw);
+               /*
+                * We increment refcnt of the parent to ensure that we can
+                * safely access it on res_counter_charge/uncharge.
+                * This refcnt will be decremented when freeing this
+                * mem_cgroup(see mem_cgroup_put).
+                */
+               mem_cgroup_get(parent);
        } else {
                res_counter_init(&mem->res, NULL);
                res_counter_init(&mem->memsw, NULL);
        }
-       mem->last_scanned_child = NULL;
+       mem->last_scanned_child = 0;
        spin_lock_init(&mem->reclaim_param_lock);
 
        if (parent)
@@ -2240,26 +2629,23 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &mem->css;
 free_out:
        __mem_cgroup_free(mem);
-       return ERR_PTR(-ENOMEM);
+       root_mem_cgroup = NULL;
+       return ERR_PTR(error);
 }
 
-static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
+static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
                                        struct cgroup *cont)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       mem_cgroup_force_empty(mem, false);
+
+       return mem_cgroup_force_empty(mem, false);
 }
 
 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
                                struct cgroup *cont)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
 
-       if (last_scanned_child) {
-               VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
-               mem_cgroup_put(last_scanned_child);
-       }
        mem_cgroup_put(mem);
 }
 
@@ -2279,7 +2665,8 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *cont,
                                struct cgroup *old_cont,
-                               struct task_struct *p)
+                               struct task_struct *p,
+                               bool threadgroup)
 {
        mutex_lock(&memcg_tasklist);
        /*
@@ -2298,6 +2685,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
        .populate = mem_cgroup_populate,
        .attach = mem_cgroup_move_task,
        .early_init = 0,
+       .use_id = 1,
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP