memcg: fix swap accounting
[safe/jmp/linux-2.6] / mm / memcontrol.c
index 55dea59..a83e039 100644 (file)
@@ -45,7 +45,7 @@ struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES     5
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
 int do_swap_account __read_mostly;
 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
 #else
@@ -62,7 +62,8 @@ enum mem_cgroup_stat_index {
         * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
         */
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
-       MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
+       MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
+       MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
 
@@ -188,6 +189,7 @@ enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
        MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
        MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
+       MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
        NR_CHARGE_TYPE,
 };
 
@@ -314,14 +316,6 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return mem;
 }
 
-static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
-{
-       if (!mem)
-               return true;
-       return css_is_removed(&mem->css);
-}
-
-
 /*
  * Call callback function against all cgroup under hierarchy tree.
  */
@@ -578,6 +572,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
        return 0;
 }
 
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+       unsigned long active;
+       unsigned long inactive;
+
+       inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
+       active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
+
+       return (active > inactive);
+}
+
 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
                                       struct zone *zone,
                                       enum lru_list lru)
@@ -897,6 +902,44 @@ static void record_last_oom(struct mem_cgroup *mem)
        mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
 }
 
+/*
+ * Currently used to update mapped file statistics, but the routine can be
+ * generalized to update other statistics as well.
+ */
+void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+{
+       struct mem_cgroup *mem;
+       struct mem_cgroup_stat *stat;
+       struct mem_cgroup_stat_cpu *cpustat;
+       int cpu;
+       struct page_cgroup *pc;
+
+       if (!page_is_file_cache(page))
+               return;
+
+       pc = lookup_page_cgroup(page);
+       if (unlikely(!pc))
+               return;
+
+       lock_page_cgroup(pc);
+       mem = pc->mem_cgroup;
+       if (!mem)
+               goto done;
+
+       if (!PageCgroupUsed(pc))
+               goto done;
+
+       /*
+        * Preemption is already disabled, we don't need get_cpu()
+        */
+       cpu = smp_processor_id();
+       stat = &mem->stat;
+       cpustat = &stat->cpustat[cpu];
+
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+done:
+       unlock_page_cgroup(pc);
+}
 
 /*
  * Unlike exported interface, "oom" parameter is added. if oom==true,
@@ -932,7 +975,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
        if (unlikely(!mem))
                return 0;
 
-       VM_BUG_ON(mem_cgroup_is_obsolete(mem));
+       VM_BUG_ON(css_is_removed(&mem->css));
 
        while (1) {
                int ret;
@@ -1024,9 +1067,7 @@ static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
                return NULL;
 
        pc = lookup_page_cgroup(page);
-       /*
-        * Used bit of swapcache is solid under page lock.
-        */
+       lock_page_cgroup(pc);
        if (PageCgroupUsed(pc)) {
                mem = pc->mem_cgroup;
                if (mem && !css_tryget(&mem->css))
@@ -1040,6 +1081,7 @@ static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
                        mem = NULL;
                rcu_read_unlock();
        }
+       unlock_page_cgroup(pc);
        return mem;
 }
 
@@ -1096,6 +1138,10 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        struct mem_cgroup_per_zone *from_mz, *to_mz;
        int nid, zid;
        int ret = -EBUSY;
+       struct page *page;
+       int cpu;
+       struct mem_cgroup_stat *stat;
+       struct mem_cgroup_stat_cpu *cpustat;
 
        VM_BUG_ON(from == to);
        VM_BUG_ON(PageLRU(pc->page));
@@ -1116,6 +1162,23 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
 
        res_counter_uncharge(&from->res, PAGE_SIZE);
        mem_cgroup_charge_statistics(from, pc, false);
+
+       page = pc->page;
+       if (page_is_file_cache(page) && page_mapped(page)) {
+               cpu = smp_processor_id();
+               /* Update mapped_file data for mem_cgroup "from" */
+               stat = &from->stat;
+               cpustat = &stat->cpustat[cpu];
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+                                               -1);
+
+               /* Update mapped_file data for mem_cgroup "to" */
+               stat = &to->stat;
+               cpustat = &stat->cpustat[cpu];
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+                                               1);
+       }
+
        if (do_swap_account)
                res_counter_uncharge(&from->memsw, PAGE_SIZE);
        css_put(&from->css);
@@ -1238,6 +1301,10 @@ int mem_cgroup_newpage_charge(struct page *page,
                                MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
 }
 
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+                                       enum charge_type ctype);
+
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
@@ -1274,16 +1341,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                unlock_page_cgroup(pc);
        }
 
-       if (do_swap_account && PageSwapCache(page)) {
-               mem = try_get_mem_cgroup_from_swapcache(page);
-               if (mem)
-                       mm = NULL;
-                 else
-                       mem = NULL;
-               /* SwapCache may be still linked to LRU now. */
-               mem_cgroup_lru_del_before_commit_swapcache(page);
-       }
-
        if (unlikely(!mm && !mem))
                mm = &init_mm;
 
@@ -1291,32 +1348,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                return mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
 
-       ret = mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
-       if (mem)
-               css_put(&mem->css);
-       if (PageSwapCache(page))
-               mem_cgroup_lru_add_after_commit_swapcache(page);
+       /* shmem */
+       if (PageSwapCache(page)) {
+               ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+               if (!ret)
+                       __mem_cgroup_commit_charge_swapin(page, mem,
+                                       MEM_CGROUP_CHARGE_TYPE_SHMEM);
+       } else
+               ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+                                       MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
 
-       if (do_swap_account && !ret && PageSwapCache(page)) {
-               swp_entry_t ent = {.val = page_private(page)};
-               unsigned short id;
-               /* avoid double counting */
-               id = swap_cgroup_record(ent, 0);
-               rcu_read_lock();
-               mem = mem_cgroup_lookup(id);
-               if (mem) {
-                       /*
-                        * We did swap-in. Then, this entry is doubly counted
-                        * both in mem and memsw. We uncharge it, here.
-                        * Recorded ID can be obsolete. We avoid calling
-                        * css_tryget()
-                        */
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-                       mem_cgroup_put(mem);
-               }
-               rcu_read_unlock();
-       }
        return ret;
 }
 
@@ -1359,7 +1400,9 @@ charge_cur_mm:
        return __mem_cgroup_try_charge(mm, mask, ptr, true);
 }
 
-void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+                                       enum charge_type ctype)
 {
        struct page_cgroup *pc;
 
@@ -1369,7 +1412,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
                return;
        pc = lookup_page_cgroup(page);
        mem_cgroup_lru_del_before_commit_swapcache(page);
-       __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+       __mem_cgroup_commit_charge(ptr, pc, ctype);
        mem_cgroup_lru_add_after_commit_swapcache(page);
        /*
         * Now swap is on-memory. This means this page may be
@@ -1400,6 +1443,12 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 
 }
 
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+       __mem_cgroup_commit_charge_swapin(page, ptr,
+                                       MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
 {
        if (mem_cgroup_disabled())
@@ -1445,6 +1494,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 
        switch (ctype) {
        case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+       case MEM_CGROUP_CHARGE_TYPE_DROP:
                if (page_mapped(page))
                        goto unlock_out;
                break;
@@ -1503,24 +1553,31 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
 }
 
+#ifdef CONFIG_SWAP
 /*
- * called from __delete_from_swap_cache() and drop "page" account.
+ * called after __delete_from_swap_cache() and drop "page" account.
  * memcg information is recorded to swap_cgroup of "ent"
  */
-void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
 {
        struct mem_cgroup *memcg;
+       int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
+
+       if (!swapout) /* this was a swap cache but the swap is unused ! */
+               ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+
+       memcg = __mem_cgroup_uncharge_common(page, ctype);
 
-       memcg = __mem_cgroup_uncharge_common(page,
-                                       MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
        /* record memcg information */
-       if (do_swap_account && memcg) {
+       if (do_swap_account && swapout && memcg) {
                swap_cgroup_record(ent, css_id(&memcg->css));
                mem_cgroup_get(memcg);
        }
-       if (memcg)
+       if (swapout && memcg)
                css_put(&memcg->css);
 }
+#endif
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 /*
@@ -1632,37 +1689,28 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
 }
 
 /*
- * A call to try to shrink memory usage under specified resource controller.
- * This is typically used for page reclaiming for shmem for reducing side
- * effect of page allocation from shmem, which is used by some mem_cgroup.
+ * A call to try to shrink memory usage on charge failure at shmem's swapin.
+ * Calling hierarchical_reclaim is not enough because we should update
+ * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
+ * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
+ * not from the memcg which this page would be charged to.
+ * try_charge_swapin does all of these works properly.
  */
-int mem_cgroup_shrink_usage(struct page *page,
+int mem_cgroup_shmem_charge_fallback(struct page *page,
                            struct mm_struct *mm,
                            gfp_t gfp_mask)
 {
        struct mem_cgroup *mem = NULL;
-       int progress = 0;
-       int retry = MEM_CGROUP_RECLAIM_RETRIES;
+       int ret;
 
        if (mem_cgroup_disabled())
                return 0;
-       if (page)
-               mem = try_get_mem_cgroup_from_swapcache(page);
-       if (!mem && mm)
-               mem = try_get_mem_cgroup_from_mm(mm);
-       if (unlikely(!mem))
-               return 0;
 
-       do {
-               progress = mem_cgroup_hierarchical_reclaim(mem,
-                                       gfp_mask, true, false);
-               progress += mem_cgroup_check_under_limit(mem);
-       } while (!progress && --retry);
+       ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+       if (!ret)
+               mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
 
-       css_put(&mem->css);
-       if (!retry)
-               return -ENOMEM;
-       return 0;
+       return ret;
 }
 
 static DEFINE_MUTEX(set_limit_mutex);
@@ -1722,16 +1770,14 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
        return ret;
 }
 
-int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-                               unsigned long long val)
+static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+                                       unsigned long long val)
 {
        int retry_count;
        u64 memlimit, oldusage, curusage;
        int children = mem_cgroup_count_children(memcg);
        int ret = -EBUSY;
 
-       if (!do_swap_account)
-               return -EINVAL;
        /* see mem_cgroup_resize_res_limit */
        retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
        oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
@@ -1966,8 +2012,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
                val = res_counter_read_u64(&mem->res, name);
                break;
        case _MEMSWAP:
-               if (do_swap_account)
-                       val = res_counter_read_u64(&mem->memsw, name);
+               val = res_counter_read_u64(&mem->memsw, name);
                break;
        default:
                BUG();
@@ -2065,6 +2110,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 enum {
        MCS_CACHE,
        MCS_RSS,
+       MCS_MAPPED_FILE,
        MCS_PGPGIN,
        MCS_PGPGOUT,
        MCS_INACTIVE_ANON,
@@ -2085,6 +2131,7 @@ struct {
 } memcg_stat_strings[NR_MCS_STAT] = {
        {"cache", "total_cache"},
        {"rss", "total_rss"},
+       {"mapped_file", "total_mapped_file"},
        {"pgpgin", "total_pgpgin"},
        {"pgpgout", "total_pgpgout"},
        {"inactive_anon", "total_inactive_anon"},
@@ -2105,6 +2152,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
        s->stat[MCS_CACHE] += val * PAGE_SIZE;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
        s->stat[MCS_RSS] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
+       s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
        s->stat[MCS_PGPGIN] += val;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);