string: factorize skip_spaces and export it to be generally available
[safe/jmp/linux-2.6] / mm / memcontrol.c
index 4ad3e6b..e0c2066 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/rcupdate.h>
 #include <linux/limits.h>
 #include <linux/mutex.h>
+#include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/spinlock.h>
@@ -54,6 +55,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
 #endif
 
 static DEFINE_MUTEX(memcg_tasklist);   /* can be hold under cgroup_mutex */
+#define SOFTLIMIT_EVENTS_THRESH (1000)
 
 /*
  * Statistics for memory cgroup.
@@ -67,6 +69,8 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
+       MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
+       MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
 
        MEM_CGROUP_STAT_NSTATS,
 };
@@ -79,6 +83,20 @@ struct mem_cgroup_stat {
        struct mem_cgroup_stat_cpu cpustat[0];
 };
 
+static inline void
+__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
+                               enum mem_cgroup_stat_index idx)
+{
+       stat->count[idx] = 0;
+}
+
+static inline s64
+__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
+                               enum mem_cgroup_stat_index idx)
+{
+       return stat->count[idx];
+}
+
 /*
  * For accounting under irq disable, no need for increment preempt count.
  */
@@ -118,6 +136,12 @@ struct mem_cgroup_per_zone {
        unsigned long           count[NR_LRU_LISTS];
 
        struct zone_reclaim_stat reclaim_stat;
+       struct rb_node          tree_node;      /* RB tree node */
+       unsigned long long      usage_in_excess;/* Set to the value by which */
+                                               /* the soft limit is exceeded*/
+       bool                    on_tree;
+       struct mem_cgroup       *mem;           /* Back pointer, we cannot */
+                                               /* use container_of        */
 };
 /* Macro for accessing counter */
 #define MEM_CGROUP_ZSTAT(mz, idx)      ((mz)->count[(idx)])
@@ -131,6 +155,26 @@ struct mem_cgroup_lru_info {
 };
 
 /*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+       struct rb_root rb_root;
+       spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+       struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+       struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
+/*
  * The memory controller data structure. The memory controller controls both
  * page cache and RSS per cgroup. We would eventually like to provide
  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
@@ -165,7 +209,7 @@ struct mem_cgroup {
        int     prev_priority;  /* for recording reclaim priority */
 
        /*
-        * While reclaiming in a hiearchy, we cache the last child we
+        * While reclaiming in a hierarchy, we cache the last child we
         * reclaimed from.
         */
        int last_scanned_child;
@@ -187,6 +231,13 @@ struct mem_cgroup {
        struct mem_cgroup_stat stat;
 };
 
+/*
+ * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
+ * limit reclaim to prevent infinite loops, if they ever occur.
+ */
+#define        MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
+#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
        MEM_CGROUP_CHARGE_TYPE_MAPPED,
@@ -211,15 +262,237 @@ enum charge_type {
 #define MEMFILE_TYPE(val)      (((val) >> 16) & 0xffff)
 #define MEMFILE_ATTR(val)      ((val) & 0xffff)
 
+/*
+ * Reclaim flags for mem_cgroup_hierarchical_reclaim
+ */
+#define MEM_CGROUP_RECLAIM_NOSWAP_BIT  0x0
+#define MEM_CGROUP_RECLAIM_NOSWAP      (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
+#define MEM_CGROUP_RECLAIM_SHRINK_BIT  0x1
+#define MEM_CGROUP_RECLAIM_SHRINK      (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
+#define MEM_CGROUP_RECLAIM_SOFT_BIT    0x2
+#define MEM_CGROUP_RECLAIM_SOFT                (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
+
 static void mem_cgroup_get(struct mem_cgroup *mem);
 static void mem_cgroup_put(struct mem_cgroup *mem);
 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
 
+static struct mem_cgroup_per_zone *
+mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
+{
+       return &mem->info.nodeinfo[nid]->zoneinfo[zid];
+}
+
+static struct mem_cgroup_per_zone *
+page_cgroup_zoneinfo(struct page_cgroup *pc)
+{
+       struct mem_cgroup *mem = pc->mem_cgroup;
+       int nid = page_cgroup_nid(pc);
+       int zid = page_cgroup_zid(pc);
+
+       if (!mem)
+               return NULL;
+
+       return mem_cgroup_zoneinfo(mem, nid, zid);
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz,
+                               unsigned long long new_usage_in_excess)
+{
+       struct rb_node **p = &mctz->rb_root.rb_node;
+       struct rb_node *parent = NULL;
+       struct mem_cgroup_per_zone *mz_node;
+
+       if (mz->on_tree)
+               return;
+
+       mz->usage_in_excess = new_usage_in_excess;
+       if (!mz->usage_in_excess)
+               return;
+       while (*p) {
+               parent = *p;
+               mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+                                       tree_node);
+               if (mz->usage_in_excess < mz_node->usage_in_excess)
+                       p = &(*p)->rb_left;
+               /*
+                * We can't avoid mem cgroups that are over their soft
+                * limit by the same amount
+                */
+               else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&mz->tree_node, parent, p);
+       rb_insert_color(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       if (!mz->on_tree)
+               return;
+       rb_erase(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       spin_lock(&mctz->lock);
+       __mem_cgroup_remove_exceeded(mem, mz, mctz);
+       spin_unlock(&mctz->lock);
+}
+
+static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
+{
+       bool ret = false;
+       int cpu;
+       s64 val;
+       struct mem_cgroup_stat_cpu *cpustat;
+
+       cpu = get_cpu();
+       cpustat = &mem->stat.cpustat[cpu];
+       val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
+       if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
+               __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
+               ret = true;
+       }
+       put_cpu();
+       return ret;
+}
+
+static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
+{
+       unsigned long long excess;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+       mctz = soft_limit_tree_from_page(page);
+
+       /*
+        * Necessary to update all ancestors when hierarchy is used.
+        * because their event counter is not touched.
+        */
+       for (; mem; mem = parent_mem_cgroup(mem)) {
+               mz = mem_cgroup_zoneinfo(mem, nid, zid);
+               excess = res_counter_soft_limit_excess(&mem->res);
+               /*
+                * We have to update the tree if mz is on RB-tree or
+                * mem is over its softlimit.
+                */
+               if (excess || mz->on_tree) {
+                       spin_lock(&mctz->lock);
+                       /* if on-tree, remove it */
+                       if (mz->on_tree)
+                               __mem_cgroup_remove_exceeded(mem, mz, mctz);
+                       /*
+                        * Insert again. mz->usage_in_excess will be updated.
+                        * If excess is 0, no tree ops.
+                        */
+                       __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
+                       spin_unlock(&mctz->lock);
+               }
+       }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
+{
+       int node, zone;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+
+       for_each_node_state(node, N_POSSIBLE) {
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       mz = mem_cgroup_zoneinfo(mem, node, zone);
+                       mctz = soft_limit_tree_node_zone(node, zone);
+                       mem_cgroup_remove_exceeded(mem, mz, mctz);
+               }
+       }
+}
+
+static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
+{
+       return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct rb_node *rightmost = NULL;
+       struct mem_cgroup_per_zone *mz;
+
+retry:
+       mz = NULL;
+       rightmost = rb_last(&mctz->rb_root);
+       if (!rightmost)
+               goto done;              /* Nothing to reclaim from */
+
+       mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+       /*
+        * Remove the node now but someone else can add it back,
+        * we will to add it back at the end of reclaim to its correct
+        * position in the tree.
+        */
+       __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+       if (!res_counter_soft_limit_excess(&mz->mem->res) ||
+               !css_tryget(&mz->mem->css))
+               goto retry;
+done:
+       return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct mem_cgroup_per_zone *mz;
+
+       spin_lock(&mctz->lock);
+       mz = __mem_cgroup_largest_soft_limit_node(mctz);
+       spin_unlock(&mctz->lock);
+       return mz;
+}
+
+static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
+                                        bool charge)
+{
+       int val = (charge) ? 1 : -1;
+       struct mem_cgroup_stat *stat = &mem->stat;
+       struct mem_cgroup_stat_cpu *cpustat;
+       int cpu = get_cpu();
+
+       cpustat = &stat->cpustat[cpu];
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
+       put_cpu();
+}
+
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
                                         struct page_cgroup *pc,
                                         bool charge)
 {
-       int val = (charge)? 1 : -1;
+       int val = (charge) ? 1 : -1;
        struct mem_cgroup_stat *stat = &mem->stat;
        struct mem_cgroup_stat_cpu *cpustat;
        int cpu = get_cpu();
@@ -236,28 +509,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
        else
                __mem_cgroup_stat_add_safe(cpustat,
                                MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
        put_cpu();
 }
 
-static struct mem_cgroup_per_zone *
-mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
-{
-       return &mem->info.nodeinfo[nid]->zoneinfo[zid];
-}
-
-static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct page_cgroup *pc)
-{
-       struct mem_cgroup *mem = pc->mem_cgroup;
-       int nid = page_cgroup_nid(pc);
-       int zid = page_cgroup_zid(pc);
-
-       if (!mem)
-               return NULL;
-
-       return mem_cgroup_zoneinfo(mem, nid, zid);
-}
-
 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
                                        enum lru_list idx)
 {
@@ -862,28 +1117,62 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
  * If shrink==true, for avoiding to free too much, this returns immedieately.
  */
 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
-                                  gfp_t gfp_mask, bool noswap, bool shrink)
+                                               struct zone *zone,
+                                               gfp_t gfp_mask,
+                                               unsigned long reclaim_options)
 {
        struct mem_cgroup *victim;
        int ret, total = 0;
        int loop = 0;
+       bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
+       bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
+       bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
+       unsigned long excess = mem_cgroup_get_excess(root_mem);
 
        /* If memsw_is_minimum==1, swap-out is of-no-use. */
        if (root_mem->memsw_is_minimum)
                noswap = true;
 
-       while (loop < 2) {
+       while (1) {
                victim = mem_cgroup_select_victim(root_mem);
-               if (victim == root_mem)
+               if (victim == root_mem) {
                        loop++;
+                       if (loop >= 2) {
+                               /*
+                                * If we have not been able to reclaim
+                                * anything, it might because there are
+                                * no reclaimable pages under this hierarchy
+                                */
+                               if (!check_soft || !total) {
+                                       css_put(&victim->css);
+                                       break;
+                               }
+                               /*
+                                * We want to do more targetted reclaim.
+                                * excess >> 2 is not to excessive so as to
+                                * reclaim too much, nor too less that we keep
+                                * coming back to reclaim from this cgroup
+                                */
+                               if (total >= (excess >> 2) ||
+                                       (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
+                                       css_put(&victim->css);
+                                       break;
+                               }
+                       }
+               }
                if (!mem_cgroup_local_usage(&victim->stat)) {
                        /* this cgroup's local usage == 0 */
                        css_put(&victim->css);
                        continue;
                }
                /* we use swappiness of local cgroup */
-               ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
-                                                  get_swappiness(victim));
+               if (check_soft)
+                       ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
+                               noswap, get_swappiness(victim), zone,
+                               zone->zone_pgdat->node_id);
+               else
+                       ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
+                                               noswap, get_swappiness(victim));
                css_put(&victim->css);
                /*
                 * At shrinking usage, we can't check we should stop here or
@@ -893,7 +1182,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                if (shrink)
                        return ret;
                total += ret;
-               if (mem_cgroup_check_under_limit(root_mem))
+               if (check_soft) {
+                       if (res_counter_check_under_soft_limit(&root_mem->res))
+                               return total;
+               } else if (mem_cgroup_check_under_limit(root_mem))
                        return 1 + total;
        }
        return total;
@@ -972,7 +1264,7 @@ done:
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        gfp_t gfp_mask, struct mem_cgroup **memcg,
-                       bool oom)
+                       bool oom, struct page *page)
 {
        struct mem_cgroup *mem, *mem_over_limit;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
@@ -1003,9 +1295,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
        VM_BUG_ON(css_is_removed(&mem->css));
 
        while (1) {
-               int ret;
-               bool noswap = false;
+               int ret = 0;
+               unsigned long flags = 0;
 
+               if (mem_cgroup_is_root(mem))
+                       goto done;
                ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
                if (likely(!ret)) {
                        if (!do_swap_account)
@@ -1016,7 +1310,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                                break;
                        /* mem+swap counter fails */
                        res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       noswap = true;
+                       flags |= MEM_CGROUP_RECLAIM_NOSWAP;
                        mem_over_limit = mem_cgroup_from_res_counter(fail_res,
                                                                        memsw);
                } else
@@ -1027,8 +1321,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                if (!(gfp_mask & __GFP_WAIT))
                        goto nomem;
 
-               ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
-                                                       noswap, false);
+               ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
+                                               gfp_mask, flags);
                if (ret)
                        continue;
 
@@ -1053,13 +1347,19 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        goto nomem;
                }
        }
+       /*
+        * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+        * if they exceeds softlimit.
+        */
+       if (mem_cgroup_soft_limit_check(mem))
+               mem_cgroup_update_tree(mem, page);
+done:
        return 0;
 nomem:
        css_put(&mem->css);
        return -ENOMEM;
 }
 
-
 /*
  * A helper function to get mem_cgroup from ID. must be called under
  * rcu_read_lock(). The caller must check css_is_removed() or some if
@@ -1126,9 +1426,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
-               res_counter_uncharge(&mem->res, PAGE_SIZE);
-               if (do_swap_account)
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+               if (!mem_cgroup_is_root(mem)) {
+                       res_counter_uncharge(&mem->res, PAGE_SIZE);
+                       if (do_swap_account)
+                               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+               }
                css_put(&mem->css);
                return;
        }
@@ -1205,7 +1507,8 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        if (pc->mem_cgroup != from)
                goto out;
 
-       res_counter_uncharge(&from->res, PAGE_SIZE);
+       if (!mem_cgroup_is_root(from))
+               res_counter_uncharge(&from->res, PAGE_SIZE);
        mem_cgroup_charge_statistics(from, pc, false);
 
        page = pc->page;
@@ -1224,7 +1527,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
                                                1);
        }
 
-       if (do_swap_account)
+       if (do_swap_account && !mem_cgroup_is_root(from))
                res_counter_uncharge(&from->memsw, PAGE_SIZE);
        css_put(&from->css);
 
@@ -1265,7 +1568,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
        parent = mem_cgroup_from_cont(pcg);
 
 
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
        if (ret || !parent)
                return ret;
 
@@ -1295,9 +1598,11 @@ uncharge:
        /* drop extra refcnt by try_charge() */
        css_put(&parent->css);
        /* uncharge if move fails */
-       res_counter_uncharge(&parent->res, PAGE_SIZE);
-       if (do_swap_account)
-               res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+       if (!mem_cgroup_is_root(parent)) {
+               res_counter_uncharge(&parent->res, PAGE_SIZE);
+               if (do_swap_account)
+                       res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+       }
        return ret;
 }
 
@@ -1322,7 +1627,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        prefetchw(pc);
 
        mem = memcg;
-       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
        if (ret || !mem)
                return ret;
 
@@ -1415,7 +1720,7 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 /*
  * While swap-in, try_charge -> commit or cancel, the page is locked.
  * And when try_charge() successfully returns, one refcnt to memcg without
- * struct page_cgroup is aquired. This refcnt will be cumsumed by
+ * struct page_cgroup is acquired. This refcnt will be consumed by
  * "commit()" or removed by "cancel()"
  */
 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
@@ -1432,23 +1737,24 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
                goto charge_cur_mm;
        /*
         * A racing thread's fault, or swapoff, may have already updated
-        * the pte, and even removed page from swap cache: return success
-        * to go on to do_swap_page()'s pte_same() test, which should fail.
+        * the pte, and even removed page from swap cache: in those cases
+        * do_swap_page()'s pte_same() test will fail; but there's also a
+        * KSM case which does need to charge the page.
         */
        if (!PageSwapCache(page))
-               return 0;
+               goto charge_cur_mm;
        mem = try_get_mem_cgroup_from_swapcache(page);
        if (!mem)
                goto charge_cur_mm;
        *ptr = mem;
-       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
        /* drop extra refcnt from tryget */
        css_put(&mem->css);
        return ret;
 charge_cur_mm:
        if (unlikely(!mm))
                mm = &init_mm;
-       return __mem_cgroup_try_charge(mm, mask, ptr, true);
+       return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
 }
 
 static void
@@ -1486,7 +1792,9 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                         * This recorded memcg can be obsolete one. So, avoid
                         * calling css_tryget
                         */
-                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+                       if (!mem_cgroup_is_root(memcg))
+                               res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+                       mem_cgroup_swap_statistics(memcg, false);
                        mem_cgroup_put(memcg);
                }
                rcu_read_unlock();
@@ -1511,9 +1819,11 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
                return;
        if (!mem)
                return;
-       res_counter_uncharge(&mem->res, PAGE_SIZE);
-       if (do_swap_account)
-               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+       if (!mem_cgroup_is_root(mem)) {
+               res_counter_uncharge(&mem->res, PAGE_SIZE);
+               if (do_swap_account)
+                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+       }
        css_put(&mem->css);
 }
 
@@ -1565,9 +1875,14 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
                break;
        }
 
-       res_counter_uncharge(&mem->res, PAGE_SIZE);
-       if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
-               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+       if (!mem_cgroup_is_root(mem)) {
+               res_counter_uncharge(&mem->res, PAGE_SIZE);
+               if (do_swap_account &&
+                               (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
+                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+       }
+       if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+               mem_cgroup_swap_statistics(mem, true);
        mem_cgroup_charge_statistics(mem, pc, false);
 
        ClearPageCgroupUsed(pc);
@@ -1581,6 +1896,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        mz = page_cgroup_zoneinfo(pc);
        unlock_page_cgroup(pc);
 
+       if (mem_cgroup_soft_limit_check(mem))
+               mem_cgroup_update_tree(mem, page);
        /* at swapout, this memcg will be accessed to record to swap */
        if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
                css_put(&mem->css);
@@ -1656,7 +1973,9 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
                 * We uncharge this because swap is freed.
                 * This memcg can be obsolete one. We avoid calling css_tryget
                 */
-               res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+               if (!mem_cgroup_is_root(memcg))
+                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+               mem_cgroup_swap_statistics(memcg, false);
                mem_cgroup_put(memcg);
        }
        rcu_read_unlock();
@@ -1685,7 +2004,8 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        unlock_page_cgroup(pc);
 
        if (mem) {
-               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+                                               page);
                css_put(&mem->css);
        }
        *ptr = mem;
@@ -1825,8 +2145,9 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                if (!ret)
                        break;
 
-               progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
-                                                  false, true);
+               progress = mem_cgroup_hierarchical_reclaim(memcg, NULL,
+                                               GFP_KERNEL,
+                                               MEM_CGROUP_RECLAIM_SHRINK);
                curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
@@ -1878,7 +2199,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                if (!ret)
                        break;
 
-               mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
+               mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
+                                               MEM_CGROUP_RECLAIM_NOSWAP |
+                                               MEM_CGROUP_RECLAIM_SHRINK);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
@@ -1889,6 +2212,97 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
        return ret;
 }
 
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                               gfp_t gfp_mask, int nid,
+                                               int zid)
+{
+       unsigned long nr_reclaimed = 0;
+       struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+       unsigned long reclaimed;
+       int loop = 0;
+       struct mem_cgroup_tree_per_zone *mctz;
+       unsigned long long excess;
+
+       if (order > 0)
+               return 0;
+
+       mctz = soft_limit_tree_node_zone(nid, zid);
+       /*
+        * This loop can run a while, specially if mem_cgroup's continuously
+        * keep exceeding their soft limit and putting the system under
+        * pressure
+        */
+       do {
+               if (next_mz)
+                       mz = next_mz;
+               else
+                       mz = mem_cgroup_largest_soft_limit_node(mctz);
+               if (!mz)
+                       break;
+
+               reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
+                                               gfp_mask,
+                                               MEM_CGROUP_RECLAIM_SOFT);
+               nr_reclaimed += reclaimed;
+               spin_lock(&mctz->lock);
+
+               /*
+                * If we failed to reclaim anything from this memory cgroup
+                * it is time to move on to the next cgroup
+                */
+               next_mz = NULL;
+               if (!reclaimed) {
+                       do {
+                               /*
+                                * Loop until we find yet another one.
+                                *
+                                * By the time we get the soft_limit lock
+                                * again, someone might have aded the
+                                * group back on the RB tree. Iterate to
+                                * make sure we get a different mem.
+                                * mem_cgroup_largest_soft_limit_node returns
+                                * NULL if no other cgroup is present on
+                                * the tree
+                                */
+                               next_mz =
+                               __mem_cgroup_largest_soft_limit_node(mctz);
+                               if (next_mz == mz) {
+                                       css_put(&next_mz->mem->css);
+                                       next_mz = NULL;
+                               } else /* next_mz == NULL or other memcg */
+                                       break;
+                       } while (1);
+               }
+               __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+               excess = res_counter_soft_limit_excess(&mz->mem->res);
+               /*
+                * One school of thought says that we should not add
+                * back the node to the tree if reclaim returns 0.
+                * But our reclaim could return 0, simply because due
+                * to priority we are exposing a smaller subset of
+                * memory to reclaim from. Consider this as a longer
+                * term TODO.
+                */
+               /* If excess == 0, no tree ops */
+               __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
+               spin_unlock(&mctz->lock);
+               css_put(&mz->mem->css);
+               loop++;
+               /*
+                * Could not reclaim anything and there are no more
+                * mem cgroups to try or we seem to be looping without
+                * reclaiming anything.
+                */
+               if (!nr_reclaimed &&
+                       (next_mz == NULL ||
+                       loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+                       break;
+       } while (!nr_reclaimed);
+       if (next_mz)
+               css_put(&next_mz->mem->css);
+       return nr_reclaimed;
+}
+
 /*
  * This routine traverse page_cgroup in given list and drop them all.
  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
@@ -2053,7 +2467,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
 
        cgroup_lock();
        /*
-        * If parent's use_hiearchy is set, we can't make any modifications
+        * If parent's use_hierarchy is set, we can't make any modifications
         * in the child subtrees. If it is unset, then the change can
         * occur, provided the current cgroup has no children.
         *
@@ -2073,20 +2487,64 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
        return retval;
 }
 
+struct mem_cgroup_idx_data {
+       s64 val;
+       enum mem_cgroup_stat_index idx;
+};
+
+static int
+mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+{
+       struct mem_cgroup_idx_data *d = data;
+       d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
+       return 0;
+}
+
+static void
+mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+                               enum mem_cgroup_stat_index idx, s64 *val)
+{
+       struct mem_cgroup_idx_data d;
+       d.idx = idx;
+       d.val = 0;
+       mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
+       *val = d.val;
+}
+
 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       u64 val = 0;
+       u64 idx_val, val;
        int type, name;
 
        type = MEMFILE_TYPE(cft->private);
        name = MEMFILE_ATTR(cft->private);
        switch (type) {
        case _MEM:
-               val = res_counter_read_u64(&mem->res, name);
+               if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
+                       mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_CACHE, &idx_val);
+                       val = idx_val;
+                       mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_RSS, &idx_val);
+                       val += idx_val;
+                       val <<= PAGE_SHIFT;
+               } else
+                       val = res_counter_read_u64(&mem->res, name);
                break;
        case _MEMSWAP:
-               val = res_counter_read_u64(&mem->memsw, name);
+               if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
+                       mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_CACHE, &idx_val);
+                       val = idx_val;
+                       mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_RSS, &idx_val);
+                       val += idx_val;
+                       mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+                       val <<= PAGE_SHIFT;
+               } else
+                       val = res_counter_read_u64(&mem->memsw, name);
                break;
        default:
                BUG();
@@ -2194,6 +2652,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
                        res_counter_reset_failcnt(&mem->memsw);
                break;
        }
+
        return 0;
 }
 
@@ -2205,6 +2664,7 @@ enum {
        MCS_MAPPED_FILE,
        MCS_PGPGIN,
        MCS_PGPGOUT,
+       MCS_SWAP,
        MCS_INACTIVE_ANON,
        MCS_ACTIVE_ANON,
        MCS_INACTIVE_FILE,
@@ -2226,6 +2686,7 @@ struct {
        {"mapped_file", "total_mapped_file"},
        {"pgpgin", "total_pgpgin"},
        {"pgpgout", "total_pgpgout"},
+       {"swap", "total_swap"},
        {"inactive_anon", "total_inactive_anon"},
        {"active_anon", "total_active_anon"},
        {"inactive_file", "total_inactive_file"},
@@ -2250,6 +2711,10 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
        s->stat[MCS_PGPGIN] += val;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
        s->stat[MCS_PGPGOUT] += val;
+       if (do_swap_account) {
+               val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
+               s->stat[MCS_SWAP] += val * PAGE_SIZE;
+       }
 
        /* per zone stat */
        val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -2281,8 +2746,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
        memset(&mystat, 0, sizeof(mystat));
        mem_cgroup_get_local_stat(mem_cont, &mystat);
 
-       for (i = 0; i < NR_MCS_STAT; i++)
+       for (i = 0; i < NR_MCS_STAT; i++) {
+               if (i == MCS_SWAP && !do_swap_account)
+                       continue;
                cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+       }
 
        /* Hierarchical information */
        {
@@ -2295,9 +2763,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
 
        memset(&mystat, 0, sizeof(mystat));
        mem_cgroup_get_total_stat(mem_cont, &mystat);
-       for (i = 0; i < NR_MCS_STAT; i++)
+       for (i = 0; i < NR_MCS_STAT; i++) {
+               if (i == MCS_SWAP && !do_swap_account)
+                       continue;
                cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
-
+       }
 
 #ifdef CONFIG_DEBUG_VM
        cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
@@ -2489,6 +2959,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
                mz = &pn->zoneinfo[zone];
                for_each_lru(l)
                        INIT_LIST_HEAD(&mz->lists[l]);
+               mz->usage_in_excess = 0;
+               mz->on_tree = false;
+               mz->mem = mem;
        }
        return 0;
 }
@@ -2534,6 +3007,7 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
 {
        int node;
 
+       mem_cgroup_remove_from_trees(mem);
        free_css_id(&mem_cgroup_subsys, &mem->css);
 
        for_each_node_state(node, N_POSSIBLE)
@@ -2582,6 +3056,31 @@ static void __init enable_swap_cgroup(void)
 }
 #endif
 
+static int mem_cgroup_soft_limit_tree_init(void)
+{
+       struct mem_cgroup_tree_per_node *rtpn;
+       struct mem_cgroup_tree_per_zone *rtpz;
+       int tmp, node, zone;
+
+       for_each_node_state(node, N_POSSIBLE) {
+               tmp = node;
+               if (!node_state(node, N_NORMAL_MEMORY))
+                       tmp = -1;
+               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+               if (!rtpn)
+                       return 1;
+
+               soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       rtpz = &rtpn->rb_tree_per_zone[zone];
+                       rtpz->rb_root = RB_ROOT;
+                       spin_lock_init(&rtpz->lock);
+               }
+       }
+       return 0;
+}
+
 static struct cgroup_subsys_state * __ref
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -2596,11 +3095,15 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
+
        /* root ? */
        if (cont->parent == NULL) {
                enable_swap_cgroup();
                parent = NULL;
                root_mem_cgroup = mem;
+               if (mem_cgroup_soft_limit_tree_init())
+                       goto free_out;
+
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
                mem->use_hierarchy = parent->use_hierarchy;