netns: igmp: allow IPPROTO_IGMP sockets in netns
[safe/jmp/linux-2.6] / mm / memcontrol.c
index dcbe30a..866dcc7 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rcupdate.h>
+#include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/mm_inline.h>
+#include <linux/page_cgroup.h>
 
 #include <asm/uaccess.h>
 
-struct cgroup_subsys mem_cgroup_subsys;
-static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
+struct cgroup_subsys mem_cgroup_subsys __read_mostly;
+#define MEM_CGROUP_RECLAIM_RETRIES     5
 
 /*
  * Statistics for memory cgroup.
@@ -45,6 +49,8 @@ enum mem_cgroup_stat_index {
         */
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
        MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
+       MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
+       MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
 
        MEM_CGROUP_STAT_NSTATS,
 };
@@ -60,11 +66,10 @@ struct mem_cgroup_stat {
 /*
  * For accounting under irq disable, no need for increment preempt count.
  */
-static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
+static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
                enum mem_cgroup_stat_index idx, int val)
 {
-       int cpu = smp_processor_id();
-       stat->cpustat[cpu].count[idx] += val;
+       stat->count[idx] += val;
 }
 
 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
@@ -80,22 +85,13 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
 /*
  * per-zone information in memory controller.
  */
-
-enum mem_cgroup_zstat_index {
-       MEM_CGROUP_ZSTAT_ACTIVE,
-       MEM_CGROUP_ZSTAT_INACTIVE,
-
-       NR_MEM_CGROUP_ZSTAT,
-};
-
 struct mem_cgroup_per_zone {
        /*
         * spin_lock to protect the per cgroup LRU
         */
        spinlock_t              lru_lock;
-       struct list_head        active_list;
-       struct list_head        inactive_list;
-       unsigned long count[NR_MEM_CGROUP_ZSTAT];
+       struct list_head        lists[NR_LRU_LISTS];
+       unsigned long           count[NR_LRU_LISTS];
 };
 /* Macro for accessing counter */
 #define MEM_CGROUP_ZSTAT(mz, idx)      ((mz)->count[(idx)])
@@ -139,63 +135,53 @@ struct mem_cgroup {
 };
 static struct mem_cgroup init_mem_cgroup;
 
-/*
- * We use the lower bit of the page->page_cgroup pointer as a bit spin
- * lock.  We need to ensure that page->page_cgroup is at least two
- * byte aligned (based on comments from Nick Piggin).  But since
- * bit_spin_lock doesn't actually set that lock bit in a non-debug
- * uniprocessor kernel, we should avoid setting it here too.
- */
-#define PAGE_CGROUP_LOCK_BIT   0x0
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define PAGE_CGROUP_LOCK       (1 << PAGE_CGROUP_LOCK_BIT)
-#else
-#define PAGE_CGROUP_LOCK       0x0
-#endif
-
-/*
- * A page_cgroup page is associated with every page descriptor. The
- * page_cgroup helps us identify information about the cgroup
- */
-struct page_cgroup {
-       struct list_head lru;           /* per cgroup LRU list */
-       struct page *page;
-       struct mem_cgroup *mem_cgroup;
-       int ref_cnt;                    /* cached, mapped, migrating */
-       int flags;
-};
-#define PAGE_CGROUP_FLAG_CACHE (0x1)   /* charged as cache */
-#define PAGE_CGROUP_FLAG_ACTIVE (0x2)  /* page is active in this cgroup */
-
-static int page_cgroup_nid(struct page_cgroup *pc)
-{
-       return page_to_nid(pc->page);
-}
-
-static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
-{
-       return page_zonenum(pc->page);
-}
-
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
        MEM_CGROUP_CHARGE_TYPE_MAPPED,
+       MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
+       MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
+       NR_CHARGE_TYPE,
+};
+
+/* only for here (for easy reading.) */
+#define PCGF_CACHE     (1UL << PCG_CACHE)
+#define PCGF_USED      (1UL << PCG_USED)
+#define PCGF_ACTIVE    (1UL << PCG_ACTIVE)
+#define PCGF_LOCK      (1UL << PCG_LOCK)
+#define PCGF_FILE      (1UL << PCG_FILE)
+static const unsigned long
+pcg_default_flags[NR_CHARGE_TYPE] = {
+       PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
+       PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
+       PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
+       0, /* FORCE */
 };
 
 /*
  * Always modified under lru lock. Then, not necessary to preempt_disable()
  */
-static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
-                                       bool charge)
+static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
+                                        struct page_cgroup *pc,
+                                        bool charge)
 {
        int val = (charge)? 1 : -1;
        struct mem_cgroup_stat *stat = &mem->stat;
+       struct mem_cgroup_stat_cpu *cpustat;
 
        VM_BUG_ON(!irqs_disabled());
-       if (flags & PAGE_CGROUP_FLAG_CACHE)
-               __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
+
+       cpustat = &stat->cpustat[smp_processor_id()];
+       if (PageCgroupCache(pc))
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
        else
-               __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
+
+       if (charge)
+               __mem_cgroup_stat_add_safe(cpustat,
+                               MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
+       else
+               __mem_cgroup_stat_add_safe(cpustat,
+                               MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
 }
 
 static struct mem_cgroup_per_zone *
@@ -215,7 +201,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
 }
 
 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
-                                       enum mem_cgroup_zstat_index idx)
+                                       enum lru_list idx)
 {
        int nid, zid;
        struct mem_cgroup_per_zone *mz;
@@ -236,105 +222,91 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
                                css);
 }
 
-static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
+       /*
+        * mm_update_next_owner() may clear mm->owner to NULL
+        * if it races with swapoff, page migration, etc.
+        * So this can be called with p == NULL.
+        */
+       if (unlikely(!p))
+               return NULL;
+
        return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
                                struct mem_cgroup, css);
 }
 
-void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
+static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
+                       struct page_cgroup *pc)
 {
-       struct mem_cgroup *mem;
-
-       mem = mem_cgroup_from_task(p);
-       css_get(&mem->css);
-       mm->mem_cgroup = mem;
-}
-
-void mm_free_cgroup(struct mm_struct *mm)
-{
-       css_put(&mm->mem_cgroup->css);
-}
-
-static inline int page_cgroup_locked(struct page *page)
-{
-       return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
+       int lru = LRU_BASE;
+
+       if (PageCgroupUnevictable(pc))
+               lru = LRU_UNEVICTABLE;
+       else {
+               if (PageCgroupActive(pc))
+                       lru += LRU_ACTIVE;
+               if (PageCgroupFile(pc))
+                       lru += LRU_FILE;
+       }
 
-static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
-{
-       VM_BUG_ON(!page_cgroup_locked(page));
-       page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
-}
+       MEM_CGROUP_ZSTAT(mz, lru) -= 1;
 
-struct page_cgroup *page_get_page_cgroup(struct page *page)
-{
-       return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
+       mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
+       list_del(&pc->lru);
 }
 
-static void lock_page_cgroup(struct page *page)
+static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
+                               struct page_cgroup *pc)
 {
-       bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
+       int lru = LRU_BASE;
+
+       if (PageCgroupUnevictable(pc))
+               lru = LRU_UNEVICTABLE;
+       else {
+               if (PageCgroupActive(pc))
+                       lru += LRU_ACTIVE;
+               if (PageCgroupFile(pc))
+                       lru += LRU_FILE;
+       }
 
-static int try_lock_page_cgroup(struct page *page)
-{
-       return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
-}
+       MEM_CGROUP_ZSTAT(mz, lru) += 1;
+       list_add(&pc->lru, &mz->lists[lru]);
 
-static void unlock_page_cgroup(struct page *page)
-{
-       bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
+       mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
 }
 
-static void __mem_cgroup_remove_list(struct page_cgroup *pc)
+static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
 {
-       int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
        struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
+       int active    = PageCgroupActive(pc);
+       int file      = PageCgroupFile(pc);
+       int unevictable = PageCgroupUnevictable(pc);
+       enum lru_list from = unevictable ? LRU_UNEVICTABLE :
+                               (LRU_FILE * !!file + !!active);
 
-       if (from)
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
-       else
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
-
-       mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
-       list_del_init(&pc->lru);
-}
-
-static void __mem_cgroup_add_list(struct page_cgroup *pc)
-{
-       int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
+       if (lru == from)
+               return;
 
-       if (!to) {
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
-               list_add(&pc->lru, &mz->inactive_list);
+       MEM_CGROUP_ZSTAT(mz, from) -= 1;
+       /*
+        * However this is done under mz->lru_lock, another flags, which
+        * are not related to LRU, will be modified from out-of-lock.
+        * We have to use atomic set/clear flags.
+        */
+       if (is_unevictable_lru(lru)) {
+               ClearPageCgroupActive(pc);
+               SetPageCgroupUnevictable(pc);
        } else {
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
-               list_add(&pc->lru, &mz->active_list);
+               if (is_active_lru(lru))
+                       SetPageCgroupActive(pc);
+               else
+                       ClearPageCgroupActive(pc);
+               ClearPageCgroupUnevictable(pc);
        }
-       mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
-}
-
-static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
-{
-       int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
-
-       if (from)
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
-       else
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
 
-       if (active) {
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
-               pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
-               list_move(&pc->lru, &mz->active_list);
-       } else {
-               MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
-               pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
-               list_move(&pc->lru, &mz->inactive_list);
-       }
+       MEM_CGROUP_ZSTAT(mz, lru) += 1;
+       list_move(&pc->lru, &mz->lists[lru]);
 }
 
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
@@ -350,13 +322,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
-void mem_cgroup_move_lists(struct page *page, bool active)
+void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
 {
        struct page_cgroup *pc;
-       struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        /*
         * We cannot lock_page_cgroup while holding zone's lru_lock,
         * because other holders of lock_page_cgroup can be interrupted
@@ -364,38 +338,16 @@ void mem_cgroup_move_lists(struct page *page, bool active)
         * safely get to page_cgroup without it, so just try_lock it:
         * mem_cgroup_isolate_pages allows for page left on wrong list.
         */
-       if (!try_lock_page_cgroup(page))
+       pc = lookup_page_cgroup(page);
+       if (!trylock_page_cgroup(pc))
                return;
-
-       /*
-        * Now page_cgroup is stable, but we cannot acquire mz->lru_lock
-        * while holding it, because mem_cgroup_force_empty_list does the
-        * reverse.  Get a hold on the mem_cgroup before unlocking, so that
-        * the zoneinfo remains stable, then take mz->lru_lock; then check
-        * that page still points to pc and pc (even if freed and reassigned
-        * to that same page meanwhile) still points to the same mem_cgroup.
-        * Then we know mz still points to the right spinlock, so it's safe
-        * to move_lists (page->page_cgroup might be reset while we do so, but
-        * that doesn't matter: pc->page is stable till we drop mz->lru_lock).
-        * We're being a little naughty not to try_lock_page_cgroup again
-        * inside there, but we are safe, aren't we?  Aren't we?  Whistle...
-        */
-       pc = page_get_page_cgroup(page);
-       if (pc) {
-               mem = pc->mem_cgroup;
+       if (pc && PageCgroupUsed(pc)) {
                mz = page_cgroup_zoneinfo(pc);
-               css_get(&mem->css);
-
-               unlock_page_cgroup(page);
-
                spin_lock_irqsave(&mz->lru_lock, flags);
-               if (page_get_page_cgroup(page) == pc && pc->mem_cgroup == mem)
-                       __mem_cgroup_move_lists(pc, active);
+               __mem_cgroup_move_lists(pc, lru);
                spin_unlock_irqrestore(&mz->lru_lock, flags);
-
-               css_put(&mem->css);
-       } else
-               unlock_page_cgroup(page);
+       }
+       unlock_page_cgroup(pc);
 }
 
 /*
@@ -416,21 +368,6 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
 }
 
 /*
- * This function is called from vmscan.c. In page reclaiming loop. balance
- * between active and inactive list is calculated. For memory controller
- * page reclaiming, we should use using mem_cgroup's imbalance rather than
- * zone's global lru imbalance.
- */
-long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
-{
-       unsigned long active, inactive;
-       /* active and inactive are the number of pages. 'long' is ok.*/
-       active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
-       inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
-       return (long) (active / (inactive + 1));
-}
-
-/*
  * prev_priority control...this will be used in memory reclaim path.
  */
 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
@@ -457,28 +394,17 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
  * (see include/linux/mmzone.h)
  */
 
-long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
-                                  struct zone *zone, int priority)
+long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
+                                       int priority, enum lru_list lru)
 {
-       long nr_active;
+       long nr_pages;
        int nid = zone->zone_pgdat->node_id;
        int zid = zone_idx(zone);
        struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
 
-       nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
-       return (nr_active >> priority);
-}
-
-long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
-                                       struct zone *zone, int priority)
-{
-       long nr_inactive;
-       int nid = zone->zone_pgdat->node_id;
-       int zid = zone_idx(zone);
-       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+       nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
 
-       nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
-       return (nr_inactive >> priority);
+       return (nr_pages >> priority);
 }
 
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -486,7 +412,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        unsigned long *scanned, int order,
                                        int mode, struct zone *z,
                                        struct mem_cgroup *mem_cont,
-                                       int active)
+                                       int active, int file)
 {
        unsigned long nr_taken = 0;
        struct page *page;
@@ -497,37 +423,38 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        int nid = z->zone_pgdat->node_id;
        int zid = zone_idx(z);
        struct mem_cgroup_per_zone *mz;
+       int lru = LRU_FILE * !!file + !!active;
 
+       BUG_ON(!mem_cont);
        mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
-       if (active)
-               src = &mz->active_list;
-       else
-               src = &mz->inactive_list;
-
+       src = &mz->lists[lru];
 
        spin_lock(&mz->lru_lock);
        scan = 0;
        list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
                if (scan >= nr_to_scan)
                        break;
+               if (unlikely(!PageCgroupUsed(pc)))
+                       continue;
                page = pc->page;
 
                if (unlikely(!PageLRU(page)))
                        continue;
 
-               if (PageActive(page) && !active) {
-                       __mem_cgroup_move_lists(pc, true);
-                       continue;
-               }
-               if (!PageActive(page) && active) {
-                       __mem_cgroup_move_lists(pc, false);
+               /*
+                * TODO: play better with lumpy reclaim, grabbing anything.
+                */
+               if (PageUnevictable(page) ||
+                   (PageActive(page) && !active) ||
+                   (!PageActive(page) && active)) {
+                       __mem_cgroup_move_lists(pc, page_lru(page));
                        continue;
                }
 
                scan++;
                list_move(&pc->lru, &pc_list);
 
-               if (__isolate_lru_page(page, mode) == 0) {
+               if (__isolate_lru_page(page, mode, file) == 0) {
                        list_move(&page->lru, dst);
                        nr_taken++;
                }
@@ -547,60 +474,45 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  * < 0 if the cgroup is over its limit
  */
 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
-                               gfp_t gfp_mask, enum charge_type ctype)
+                               gfp_t gfp_mask, enum charge_type ctype,
+                               struct mem_cgroup *memcg)
 {
        struct mem_cgroup *mem;
        struct page_cgroup *pc;
-       unsigned long flags;
        unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct mem_cgroup_per_zone *mz;
+       unsigned long flags;
 
-       /*
-        * Should page_cgroup's go to their own slab?
-        * One could optimize the performance of the charging routine
-        * by saving a bit in the page_flags and using it as a lock
-        * to see if the cgroup page already has a page_cgroup associated
-        * with it
-        */
-retry:
-       lock_page_cgroup(page);
-       pc = page_get_page_cgroup(page);
-       /*
-        * The page_cgroup exists and
-        * the page has already been accounted.
-        */
-       if (pc) {
-               VM_BUG_ON(pc->page != page);
-               VM_BUG_ON(pc->ref_cnt <= 0);
-
-               pc->ref_cnt++;
-               unlock_page_cgroup(page);
-               goto done;
-       }
-       unlock_page_cgroup(page);
-
-       pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
-       if (pc == NULL)
-               goto err;
-
+       pc = lookup_page_cgroup(page);
+       /* can happen at boot */
+       if (unlikely(!pc))
+               return 0;
+       prefetchw(pc);
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
         * thread group leader migrates. It's possible that mm is not
         * set, if so charge the init_mm (happens for pagecache usage).
         */
-       if (!mm)
-               mm = &init_mm;
 
-       rcu_read_lock();
-       mem = rcu_dereference(mm->mem_cgroup);
-       /*
-        * For every charge from the cgroup, increment reference count
-        */
-       css_get(&mem->css);
-       rcu_read_unlock();
+       if (likely(!memcg)) {
+               rcu_read_lock();
+               mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+               if (unlikely(!mem)) {
+                       rcu_read_unlock();
+                       return 0;
+               }
+               /*
+                * For every charge from the cgroup, increment reference count
+                */
+               css_get(&mem->css);
+               rcu_read_unlock();
+       } else {
+               mem = memcg;
+               css_get(&memcg->css);
+       }
 
-       while (res_counter_charge(&mem->res, PAGE_SIZE)) {
+       while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
                if (!(gfp_mask & __GFP_WAIT))
                        goto out;
 
@@ -621,212 +533,320 @@ retry:
                        mem_cgroup_out_of_memory(mem, gfp_mask);
                        goto out;
                }
-               congestion_wait(WRITE, HZ/10);
        }
 
-       pc->ref_cnt = 1;
-       pc->mem_cgroup = mem;
-       pc->page = page;
-       pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
-       if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
-               pc->flags |= PAGE_CGROUP_FLAG_CACHE;
-
-       lock_page_cgroup(page);
-       if (page_get_page_cgroup(page)) {
-               unlock_page_cgroup(page);
-               /*
-                * Another charge has been added to this page already.
-                * We take lock_page_cgroup(page) again and read
-                * page->cgroup, increment refcnt.... just retry is OK.
-                */
+
+       lock_page_cgroup(pc);
+       if (unlikely(PageCgroupUsed(pc))) {
+               unlock_page_cgroup(pc);
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
-               kfree(pc);
-               goto retry;
+
+               goto done;
        }
-       page_assign_page_cgroup(page, pc);
-       unlock_page_cgroup(page);
+       pc->mem_cgroup = mem;
+       /*
+        * If a page is accounted as a page cache, insert to inactive list.
+        * If anon, insert to active list.
+        */
+       pc->flags = pcg_default_flags[ctype];
 
        mz = page_cgroup_zoneinfo(pc);
+
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_add_list(pc);
+       __mem_cgroup_add_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
+       unlock_page_cgroup(pc);
 
 done:
        return 0;
 out:
        css_put(&mem->css);
-       kfree(pc);
-err:
        return -ENOMEM;
 }
 
 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 {
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+       if (PageCompound(page))
+               return 0;
+       /*
+        * If already mapped, we don't have to account.
+        * If page cache, page->mapping has address_space.
+        * But page->mapping may have out-of-use anon_vma pointer,
+        * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
+        * is NULL.
+        */
+       if (page_mapped(page) || (page->mapping && !PageAnon(page)))
+               return 0;
+       if (unlikely(!mm))
+               mm = &init_mm;
        return mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_MAPPED);
+                               MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
 }
 
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
-       if (!mm)
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+       if (PageCompound(page))
+               return 0;
+       /*
+        * Corner case handling. This is called from add_to_page_cache()
+        * in usual. But some FS (shmem) precharges this page before calling it
+        * and call add_to_page_cache() with GFP_NOWAIT.
+        *
+        * For GFP_NOWAIT case, the page may be pre-charged before calling
+        * add_to_page_cache(). (See shmem.c) check it here and avoid to call
+        * charge twice. (It works but has to pay a bit larger cost.)
+        */
+       if (!(gfp_mask & __GFP_WAIT)) {
+               struct page_cgroup *pc;
+
+
+               pc = lookup_page_cgroup(page);
+               if (!pc)
+                       return 0;
+               lock_page_cgroup(pc);
+               if (PageCgroupUsed(pc)) {
+                       unlock_page_cgroup(pc);
+                       return 0;
+               }
+               unlock_page_cgroup(pc);
+       }
+
+       if (unlikely(!mm))
                mm = &init_mm;
-       return mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_CACHE);
+
+       if (page_is_file_cache(page))
+               return mem_cgroup_charge_common(page, mm, gfp_mask,
+                               MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
+       else
+               return mem_cgroup_charge_common(page, mm, gfp_mask,
+                               MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
 }
 
 /*
- * Uncharging is always a welcome operation, we never complain, simply
- * uncharge.
+ * uncharge if !page_mapped(page)
  */
-void mem_cgroup_uncharge_page(struct page *page)
+static void
+__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 {
        struct page_cgroup *pc;
        struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        /*
         * Check if our page_cgroup is valid
         */
-       lock_page_cgroup(page);
-       pc = page_get_page_cgroup(page);
-       if (!pc)
-               goto unlock;
+       pc = lookup_page_cgroup(page);
+       if (unlikely(!pc || !PageCgroupUsed(pc)))
+               return;
 
-       VM_BUG_ON(pc->page != page);
-       VM_BUG_ON(pc->ref_cnt <= 0);
+       lock_page_cgroup(pc);
+       if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
+            || !PageCgroupUsed(pc)) {
+               /* This happens at race in zap_pte_range() and do_swap_page()*/
+               unlock_page_cgroup(pc);
+               return;
+       }
+       ClearPageCgroupUsed(pc);
+       mem = pc->mem_cgroup;
 
-       if (--(pc->ref_cnt) == 0) {
-               page_assign_page_cgroup(page, NULL);
-               unlock_page_cgroup(page);
+       mz = page_cgroup_zoneinfo(pc);
+       spin_lock_irqsave(&mz->lru_lock, flags);
+       __mem_cgroup_remove_list(mz, pc);
+       spin_unlock_irqrestore(&mz->lru_lock, flags);
+       unlock_page_cgroup(pc);
 
-               mz = page_cgroup_zoneinfo(pc);
-               spin_lock_irqsave(&mz->lru_lock, flags);
-               __mem_cgroup_remove_list(pc);
-               spin_unlock_irqrestore(&mz->lru_lock, flags);
+       res_counter_uncharge(&mem->res, PAGE_SIZE);
+       css_put(&mem->css);
 
-               mem = pc->mem_cgroup;
-               res_counter_uncharge(&mem->res, PAGE_SIZE);
-               css_put(&mem->css);
+       return;
+}
 
-               kfree(pc);
+void mem_cgroup_uncharge_page(struct page *page)
+{
+       /* early check. */
+       if (page_mapped(page))
                return;
-       }
+       if (page->mapping && !PageAnon(page))
+               return;
+       __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
 
-unlock:
-       unlock_page_cgroup(page);
+void mem_cgroup_uncharge_cache_page(struct page *page)
+{
+       VM_BUG_ON(page_mapped(page));
+       VM_BUG_ON(page->mapping);
+       __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
 }
 
 /*
- * Returns non-zero if a page (under migration) has valid page_cgroup member.
- * Refcnt of page_cgroup is incremented.
+ * Before starting migration, account against new page.
  */
-int mem_cgroup_prepare_migration(struct page *page)
+int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
 {
        struct page_cgroup *pc;
+       struct mem_cgroup *mem = NULL;
+       enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
+       int ret = 0;
 
-       lock_page_cgroup(page);
-       pc = page_get_page_cgroup(page);
-       if (pc)
-               pc->ref_cnt++;
-       unlock_page_cgroup(page);
-       return pc != NULL;
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
+       pc = lookup_page_cgroup(page);
+       lock_page_cgroup(pc);
+       if (PageCgroupUsed(pc)) {
+               mem = pc->mem_cgroup;
+               css_get(&mem->css);
+               if (PageCgroupCache(pc)) {
+                       if (page_is_file_cache(page))
+                               ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+                       else
+                               ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+               }
+       }
+       unlock_page_cgroup(pc);
+       if (mem) {
+               ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
+                       ctype, mem);
+               css_put(&mem->css);
+       }
+       return ret;
 }
 
-void mem_cgroup_end_migration(struct page *page)
+/* remove redundant charge if migration failed*/
+void mem_cgroup_end_migration(struct page *newpage)
 {
-       mem_cgroup_uncharge_page(page);
+       /*
+        * At success, page->mapping is not NULL.
+        * special rollback care is necessary when
+        * 1. at migration failure. (newpage->mapping is cleared in this case)
+        * 2. the newpage was moved but not remapped again because the task
+        *    exits and the newpage is obsolete. In this case, the new page
+        *    may be a swapcache. So, we just call mem_cgroup_uncharge_page()
+        *    always for avoiding mess. The  page_cgroup will be removed if
+        *    unnecessary. File cache pages is still on radix-tree. Don't
+        *    care it.
+        */
+       if (!newpage->mapping)
+               __mem_cgroup_uncharge_common(newpage,
+                               MEM_CGROUP_CHARGE_TYPE_FORCE);
+       else if (PageAnon(newpage))
+               mem_cgroup_uncharge_page(newpage);
 }
 
 /*
- * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
- * And no race with uncharge() routines because page_cgroup for *page*
- * has extra one reference by mem_cgroup_prepare_migration.
+ * A call to try to shrink memory usage under specified resource controller.
+ * This is typically used for page reclaiming for shmem for reducing side
+ * effect of page allocation from shmem, which is used by some mem_cgroup.
  */
-void mem_cgroup_page_migration(struct page *page, struct page *newpage)
+int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
 {
-       struct page_cgroup *pc;
-       struct mem_cgroup_per_zone *mz;
-       unsigned long flags;
+       struct mem_cgroup *mem;
+       int progress = 0;
+       int retry = MEM_CGROUP_RECLAIM_RETRIES;
 
-       lock_page_cgroup(page);
-       pc = page_get_page_cgroup(page);
-       if (!pc) {
-               unlock_page_cgroup(page);
-               return;
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+       if (!mm)
+               return 0;
+
+       rcu_read_lock();
+       mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (unlikely(!mem)) {
+               rcu_read_unlock();
+               return 0;
        }
+       css_get(&mem->css);
+       rcu_read_unlock();
 
-       page_assign_page_cgroup(page, NULL);
-       unlock_page_cgroup(page);
+       do {
+               progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
+               progress += res_counter_check_under_limit(&mem->res);
+       } while (!progress && --retry);
 
-       mz = page_cgroup_zoneinfo(pc);
-       spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_remove_list(pc);
-       spin_unlock_irqrestore(&mz->lru_lock, flags);
+       css_put(&mem->css);
+       if (!retry)
+               return -ENOMEM;
+       return 0;
+}
 
-       pc->page = newpage;
-       lock_page_cgroup(newpage);
-       page_assign_page_cgroup(newpage, pc);
-       unlock_page_cgroup(newpage);
+int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
+{
 
-       mz = page_cgroup_zoneinfo(pc);
-       spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_add_list(pc);
-       spin_unlock_irqrestore(&mz->lru_lock, flags);
+       int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+       int progress;
+       int ret = 0;
+
+       while (res_counter_set_limit(&memcg->res, val)) {
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+               if (!retry_count) {
+                       ret = -EBUSY;
+                       break;
+               }
+               progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
+               if (!progress)
+                       retry_count--;
+       }
+       return ret;
 }
 
+
 /*
  * This routine traverse page_cgroup in given list and drop them all.
- * This routine ignores page_cgroup->ref_cnt.
  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  */
 #define FORCE_UNCHARGE_BATCH   (128)
 static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
                            struct mem_cgroup_per_zone *mz,
-                           int active)
+                           enum lru_list lru)
 {
        struct page_cgroup *pc;
        struct page *page;
-       int count;
+       int count = FORCE_UNCHARGE_BATCH;
        unsigned long flags;
        struct list_head *list;
 
-       if (active)
-               list = &mz->active_list;
-       else
-               list = &mz->inactive_list;
+       list = &mz->lists[lru];
 
-       if (list_empty(list))
-               return;
-retry:
-       count = FORCE_UNCHARGE_BATCH;
        spin_lock_irqsave(&mz->lru_lock, flags);
-
-       while (--count && !list_empty(list)) {
+       while (!list_empty(list)) {
                pc = list_entry(list->prev, struct page_cgroup, lru);
                page = pc->page;
-               lock_page_cgroup(page);
-               if (page_get_page_cgroup(page) == pc) {
-                       page_assign_page_cgroup(page, NULL);
-                       unlock_page_cgroup(page);
-                       __mem_cgroup_remove_list(pc);
-                       res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       css_put(&mem->css);
-                       kfree(pc);
+               if (!PageCgroupUsed(pc))
+                       break;
+               get_page(page);
+               spin_unlock_irqrestore(&mz->lru_lock, flags);
+               /*
+                * Check if this page is on LRU. !LRU page can be found
+                * if it's under page migration.
+                */
+               if (PageLRU(page)) {
+                       __mem_cgroup_uncharge_common(page,
+                                       MEM_CGROUP_CHARGE_TYPE_FORCE);
+                       put_page(page);
+                       if (--count <= 0) {
+                               count = FORCE_UNCHARGE_BATCH;
+                               cond_resched();
+                       }
                } else {
-                       /* racing uncharge: let page go then retry */
-                       unlock_page_cgroup(page);
+                       spin_lock_irqsave(&mz->lru_lock, flags);
                        break;
                }
+               spin_lock_irqsave(&mz->lru_lock, flags);
        }
-
        spin_unlock_irqrestore(&mz->lru_lock, flags);
-       if (!list_empty(list)) {
-               cond_resched();
-               goto retry;
-       }
 }
 
 /*
@@ -847,15 +867,17 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
        while (mem->res.usage > 0) {
                if (atomic_read(&mem->css.cgroup->count) > 0)
                        goto out;
+               /* This is for making all *used* pages to be on LRU. */
+               lru_add_drain_all();
                for_each_node_state(node, N_POSSIBLE)
                        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
                                struct mem_cgroup_per_zone *mz;
+                               enum lru_list l;
                                mz = mem_cgroup_zoneinfo(mem, node, zid);
-                               /* drop all page_cgroup in active_list */
-                               mem_cgroup_force_empty_list(mem, mz, 1);
-                               /* drop all page_cgroup in inactive_list */
-                               mem_cgroup_force_empty_list(mem, mz, 0);
+                               for_each_lru(l)
+                                       mem_cgroup_force_empty_list(mem, mz, l);
                        }
+               cond_resched();
        }
        ret = 0;
 out:
@@ -863,58 +885,55 @@ out:
        return ret;
 }
 
-static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
+static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
-       *tmp = memparse(buf, &buf);
-       if (*buf != '\0')
-               return -EINVAL;
-
-       /*
-        * Round up the value to the closest page size
-        */
-       *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
-       return 0;
+       return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
+                                   cft->private);
 }
-
-static ssize_t mem_cgroup_read(struct cgroup *cont,
-                       struct cftype *cft, struct file *file,
-                       char __user *userbuf, size_t nbytes, loff_t *ppos)
+/*
+ * The user of this function is...
+ * RES_LIMIT.
+ */
+static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer)
 {
-       return res_counter_read(&mem_cgroup_from_cont(cont)->res,
-                               cft->private, userbuf, nbytes, ppos,
-                               NULL);
-}
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       unsigned long long val;
+       int ret;
 
-static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
-                               struct file *file, const char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
-{
-       return res_counter_write(&mem_cgroup_from_cont(cont)->res,
-                               cft->private, userbuf, nbytes, ppos,
-                               mem_cgroup_write_strategy);
+       switch (cft->private) {
+       case RES_LIMIT:
+               /* This function does all necessary parse...reuse it */
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (!ret)
+                       ret = mem_cgroup_resize_limit(memcg, val);
+               break;
+       default:
+               ret = -EINVAL; /* should be BUG() ? */
+               break;
+       }
+       return ret;
 }
 
-static ssize_t mem_force_empty_write(struct cgroup *cont,
-                               struct cftype *cft, struct file *file,
-                               const char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
+static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 {
-       struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       int ret = mem_cgroup_force_empty(mem);
-       if (!ret)
-               ret = nbytes;
-       return ret;
+       struct mem_cgroup *mem;
+
+       mem = mem_cgroup_from_cont(cont);
+       switch (event) {
+       case RES_MAX_USAGE:
+               res_counter_reset_max(&mem->res);
+               break;
+       case RES_FAILCNT:
+               res_counter_reset_failcnt(&mem->res);
+               break;
+       }
+       return 0;
 }
 
-/*
- * Note: This should be removed if cgroup supports write-only file.
- */
-static ssize_t mem_force_empty_read(struct cgroup *cont,
-                               struct cftype *cft,
-                               struct file *file, char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
+static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
 {
-       return -EINVAL;
+       return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
 }
 
 static const struct mem_cgroup_stat_desc {
@@ -923,11 +942,13 @@ static const struct mem_cgroup_stat_desc {
 } mem_cgroup_stat_desc[] = {
        [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
        [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
+       [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
+       [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
 };
 
-static int mem_control_stat_show(struct seq_file *m, void *arg)
+static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
+                                struct cgroup_map_cb *cb)
 {
-       struct cgroup *cont = m->private;
        struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
        struct mem_cgroup_stat *stat = &mem_cont->stat;
        int i;
@@ -937,63 +958,66 @@ static int mem_control_stat_show(struct seq_file *m, void *arg)
 
                val = mem_cgroup_read_stat(stat, i);
                val *= mem_cgroup_stat_desc[i].unit;
-               seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
-                               (long long)val);
+               cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
        }
        /* showing # of active pages */
        {
-               unsigned long active, inactive;
-
-               inactive = mem_cgroup_get_all_zonestat(mem_cont,
-                                               MEM_CGROUP_ZSTAT_INACTIVE);
-               active = mem_cgroup_get_all_zonestat(mem_cont,
-                                               MEM_CGROUP_ZSTAT_ACTIVE);
-               seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
-               seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
+               unsigned long active_anon, inactive_anon;
+               unsigned long active_file, inactive_file;
+               unsigned long unevictable;
+
+               inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
+                                               LRU_INACTIVE_ANON);
+               active_anon = mem_cgroup_get_all_zonestat(mem_cont,
+                                               LRU_ACTIVE_ANON);
+               inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
+                                               LRU_INACTIVE_FILE);
+               active_file = mem_cgroup_get_all_zonestat(mem_cont,
+                                               LRU_ACTIVE_FILE);
+               unevictable = mem_cgroup_get_all_zonestat(mem_cont,
+                                                       LRU_UNEVICTABLE);
+
+               cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
+               cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
+               cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
+               cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
+               cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
+
        }
        return 0;
 }
 
-static const struct file_operations mem_control_stat_file_operations = {
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int mem_control_stat_open(struct inode *unused, struct file *file)
-{
-       /* XXX __d_cont */
-       struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
-       file->f_op = &mem_control_stat_file_operations;
-       return single_open(file, mem_control_stat_show, cont);
-}
-
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
                .private = RES_USAGE,
-               .read = mem_cgroup_read,
+               .read_u64 = mem_cgroup_read,
+       },
+       {
+               .name = "max_usage_in_bytes",
+               .private = RES_MAX_USAGE,
+               .trigger = mem_cgroup_reset,
+               .read_u64 = mem_cgroup_read,
        },
        {
                .name = "limit_in_bytes",
                .private = RES_LIMIT,
-               .write = mem_cgroup_write,
-               .read = mem_cgroup_read,
+               .write_string = mem_cgroup_write,
+               .read_u64 = mem_cgroup_read,
        },
        {
                .name = "failcnt",
                .private = RES_FAILCNT,
-               .read = mem_cgroup_read,
+               .trigger = mem_cgroup_reset,
+               .read_u64 = mem_cgroup_read,
        },
        {
                .name = "force_empty",
-               .write = mem_force_empty_write,
-               .read = mem_force_empty_read,
+               .trigger = mem_force_empty_write,
        },
        {
                .name = "stat",
-               .open = mem_control_stat_open,
+               .read_map = mem_control_stat_show,
        },
 };
 
@@ -1001,7 +1025,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 {
        struct mem_cgroup_per_node *pn;
        struct mem_cgroup_per_zone *mz;
-       int zone;
+       enum lru_list l;
+       int zone, tmp = node;
        /*
         * This routine is called against possible nodes.
         * But it's BUG to call kmalloc() against offline node.
@@ -1010,10 +1035,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
         *       never be onlined. It's better to use memory hotplug callback
         *       function.
         */
-       if (node_state(node, N_HIGH_MEMORY))
-               pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
-       else
-               pn = kmalloc(sizeof(*pn), GFP_KERNEL);
+       if (!node_state(node, N_NORMAL_MEMORY))
+               tmp = -1;
+       pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
        if (!pn)
                return 1;
 
@@ -1022,9 +1046,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
-               INIT_LIST_HEAD(&mz->active_list);
-               INIT_LIST_HEAD(&mz->inactive_list);
                spin_lock_init(&mz->lru_lock);
+               for_each_lru(l)
+                       INIT_LIST_HEAD(&mz->lists[l]);
        }
        return 0;
 }
@@ -1034,6 +1058,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
        kfree(mem->info.nodeinfo[node]);
 }
 
+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+       struct mem_cgroup *mem;
+
+       if (sizeof(*mem) < PAGE_SIZE)
+               mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+       else
+               mem = vmalloc(sizeof(*mem));
+
+       if (mem)
+               memset(mem, 0, sizeof(*mem));
+       return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+       if (sizeof(*mem) < PAGE_SIZE)
+               kfree(mem);
+       else
+               vfree(mem);
+}
+
+
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -1042,17 +1089,14 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 
        if (unlikely((cont->parent) == NULL)) {
                mem = &init_mem_cgroup;
-               init_mm.mem_cgroup = mem;
-       } else
-               mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
-
-       if (mem == NULL)
-               return ERR_PTR(-ENOMEM);
+       } else {
+               mem = mem_cgroup_alloc();
+               if (!mem)
+                       return ERR_PTR(-ENOMEM);
+       }
 
        res_counter_init(&mem->res);
 
-       memset(&mem->info, 0, sizeof(mem->info));
-
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
@@ -1062,7 +1106,7 @@ free_out:
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
-               kfree(mem);
+               mem_cgroup_free(mem);
        return ERR_PTR(-ENOMEM);
 }
 
@@ -1082,7 +1126,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
 
-       kfree(mem_cgroup_from_cont(cont));
+       mem_cgroup_free(mem_cgroup_from_cont(cont));
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
@@ -1107,20 +1151,13 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
        mem = mem_cgroup_from_cont(cont);
        old_mem = mem_cgroup_from_cont(old_cont);
 
-       if (mem == old_mem)
-               goto out;
-
        /*
         * Only thread group leaders are allowed to migrate, the mm_struct is
         * in effect owned by the leader
         */
-       if (p->tgid != p->pid)
+       if (!thread_group_leader(p))
                goto out;
 
-       css_get(&mem->css);
-       rcu_assign_pointer(mm->mem_cgroup, mem);
-       css_put(&old_mem->css);
-
 out:
        mmput(mm);
 }