memcg: show memcg information during OOM
[safe/jmp/linux-2.6] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/swap.h>
34 #include <linux/spinlock.h>
35 #include <linux/fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/vmalloc.h>
38 #include <linux/mm_inline.h>
39 #include <linux/page_cgroup.h>
40 #include "internal.h"
41
42 #include <asm/uaccess.h>
43
44 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45 #define MEM_CGROUP_RECLAIM_RETRIES      5
46
47 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
48 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
49 int do_swap_account __read_mostly;
50 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
51 #else
52 #define do_swap_account         (0)
53 #endif
54
55 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
56
57 /*
58  * Statistics for memory cgroup.
59  */
60 enum mem_cgroup_stat_index {
61         /*
62          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
63          */
64         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
65         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
66         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
67         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
68
69         MEM_CGROUP_STAT_NSTATS,
70 };
71
72 struct mem_cgroup_stat_cpu {
73         s64 count[MEM_CGROUP_STAT_NSTATS];
74 } ____cacheline_aligned_in_smp;
75
76 struct mem_cgroup_stat {
77         struct mem_cgroup_stat_cpu cpustat[0];
78 };
79
80 /*
81  * For accounting under irq disable, no need for increment preempt count.
82  */
83 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
84                 enum mem_cgroup_stat_index idx, int val)
85 {
86         stat->count[idx] += val;
87 }
88
89 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
90                 enum mem_cgroup_stat_index idx)
91 {
92         int cpu;
93         s64 ret = 0;
94         for_each_possible_cpu(cpu)
95                 ret += stat->cpustat[cpu].count[idx];
96         return ret;
97 }
98
99 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
100 {
101         s64 ret;
102
103         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
104         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
105         return ret;
106 }
107
108 /*
109  * per-zone information in memory controller.
110  */
111 struct mem_cgroup_per_zone {
112         /*
113          * spin_lock to protect the per cgroup LRU
114          */
115         struct list_head        lists[NR_LRU_LISTS];
116         unsigned long           count[NR_LRU_LISTS];
117
118         struct zone_reclaim_stat reclaim_stat;
119 };
120 /* Macro for accessing counter */
121 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
122
123 struct mem_cgroup_per_node {
124         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
125 };
126
127 struct mem_cgroup_lru_info {
128         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
129 };
130
131 /*
132  * The memory controller data structure. The memory controller controls both
133  * page cache and RSS per cgroup. We would eventually like to provide
134  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
135  * to help the administrator determine what knobs to tune.
136  *
137  * TODO: Add a water mark for the memory controller. Reclaim will begin when
138  * we hit the water mark. May be even add a low water mark, such that
139  * no reclaim occurs from a cgroup at it's low water mark, this is
140  * a feature that will be implemented much later in the future.
141  */
142 struct mem_cgroup {
143         struct cgroup_subsys_state css;
144         /*
145          * the counter to account for memory usage
146          */
147         struct res_counter res;
148         /*
149          * the counter to account for mem+swap usage.
150          */
151         struct res_counter memsw;
152         /*
153          * Per cgroup active and inactive list, similar to the
154          * per zone LRU lists.
155          */
156         struct mem_cgroup_lru_info info;
157
158         /*
159           protect against reclaim related member.
160         */
161         spinlock_t reclaim_param_lock;
162
163         int     prev_priority;  /* for recording reclaim priority */
164
165         /*
166          * While reclaiming in a hiearchy, we cache the last child we
167          * reclaimed from.
168          */
169         int last_scanned_child;
170         /*
171          * Should the accounting and control be hierarchical, per subtree?
172          */
173         bool use_hierarchy;
174         unsigned long   last_oom_jiffies;
175         atomic_t        refcnt;
176
177         unsigned int    swappiness;
178
179         /*
180          * statistics. This must be placed at the end of memcg.
181          */
182         struct mem_cgroup_stat stat;
183 };
184
185 enum charge_type {
186         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
187         MEM_CGROUP_CHARGE_TYPE_MAPPED,
188         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
189         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
190         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
191         NR_CHARGE_TYPE,
192 };
193
194 /* only for here (for easy reading.) */
195 #define PCGF_CACHE      (1UL << PCG_CACHE)
196 #define PCGF_USED       (1UL << PCG_USED)
197 #define PCGF_LOCK       (1UL << PCG_LOCK)
198 static const unsigned long
199 pcg_default_flags[NR_CHARGE_TYPE] = {
200         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
201         PCGF_USED | PCGF_LOCK, /* Anon */
202         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
203         0, /* FORCE */
204 };
205
206 /* for encoding cft->private value on file */
207 #define _MEM                    (0)
208 #define _MEMSWAP                (1)
209 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
210 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
211 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
212
213 static void mem_cgroup_get(struct mem_cgroup *mem);
214 static void mem_cgroup_put(struct mem_cgroup *mem);
215 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
216
217 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
218                                          struct page_cgroup *pc,
219                                          bool charge)
220 {
221         int val = (charge)? 1 : -1;
222         struct mem_cgroup_stat *stat = &mem->stat;
223         struct mem_cgroup_stat_cpu *cpustat;
224         int cpu = get_cpu();
225
226         cpustat = &stat->cpustat[cpu];
227         if (PageCgroupCache(pc))
228                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
229         else
230                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
231
232         if (charge)
233                 __mem_cgroup_stat_add_safe(cpustat,
234                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
235         else
236                 __mem_cgroup_stat_add_safe(cpustat,
237                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
238         put_cpu();
239 }
240
241 static struct mem_cgroup_per_zone *
242 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
243 {
244         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
245 }
246
247 static struct mem_cgroup_per_zone *
248 page_cgroup_zoneinfo(struct page_cgroup *pc)
249 {
250         struct mem_cgroup *mem = pc->mem_cgroup;
251         int nid = page_cgroup_nid(pc);
252         int zid = page_cgroup_zid(pc);
253
254         if (!mem)
255                 return NULL;
256
257         return mem_cgroup_zoneinfo(mem, nid, zid);
258 }
259
260 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
261                                         enum lru_list idx)
262 {
263         int nid, zid;
264         struct mem_cgroup_per_zone *mz;
265         u64 total = 0;
266
267         for_each_online_node(nid)
268                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
269                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
270                         total += MEM_CGROUP_ZSTAT(mz, idx);
271                 }
272         return total;
273 }
274
275 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
276 {
277         return container_of(cgroup_subsys_state(cont,
278                                 mem_cgroup_subsys_id), struct mem_cgroup,
279                                 css);
280 }
281
282 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
283 {
284         /*
285          * mm_update_next_owner() may clear mm->owner to NULL
286          * if it races with swapoff, page migration, etc.
287          * So this can be called with p == NULL.
288          */
289         if (unlikely(!p))
290                 return NULL;
291
292         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
293                                 struct mem_cgroup, css);
294 }
295
296 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
297 {
298         struct mem_cgroup *mem = NULL;
299
300         if (!mm)
301                 return NULL;
302         /*
303          * Because we have no locks, mm->owner's may be being moved to other
304          * cgroup. We use css_tryget() here even if this looks
305          * pessimistic (rather than adding locks here).
306          */
307         rcu_read_lock();
308         do {
309                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
310                 if (unlikely(!mem))
311                         break;
312         } while (!css_tryget(&mem->css));
313         rcu_read_unlock();
314         return mem;
315 }
316
317 static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
318 {
319         if (!mem)
320                 return true;
321         return css_is_removed(&mem->css);
322 }
323
324
325 /*
326  * Call callback function against all cgroup under hierarchy tree.
327  */
328 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
329                           int (*func)(struct mem_cgroup *, void *))
330 {
331         int found, ret, nextid;
332         struct cgroup_subsys_state *css;
333         struct mem_cgroup *mem;
334
335         if (!root->use_hierarchy)
336                 return (*func)(root, data);
337
338         nextid = 1;
339         do {
340                 ret = 0;
341                 mem = NULL;
342
343                 rcu_read_lock();
344                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
345                                    &found);
346                 if (css && css_tryget(css))
347                         mem = container_of(css, struct mem_cgroup, css);
348                 rcu_read_unlock();
349
350                 if (mem) {
351                         ret = (*func)(mem, data);
352                         css_put(&mem->css);
353                 }
354                 nextid = found + 1;
355         } while (!ret && css);
356
357         return ret;
358 }
359
360 /*
361  * Following LRU functions are allowed to be used without PCG_LOCK.
362  * Operations are called by routine of global LRU independently from memcg.
363  * What we have to take care of here is validness of pc->mem_cgroup.
364  *
365  * Changes to pc->mem_cgroup happens when
366  * 1. charge
367  * 2. moving account
368  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
369  * It is added to LRU before charge.
370  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
371  * When moving account, the page is not on LRU. It's isolated.
372  */
373
374 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
375 {
376         struct page_cgroup *pc;
377         struct mem_cgroup *mem;
378         struct mem_cgroup_per_zone *mz;
379
380         if (mem_cgroup_disabled())
381                 return;
382         pc = lookup_page_cgroup(page);
383         /* can happen while we handle swapcache. */
384         if (list_empty(&pc->lru) || !pc->mem_cgroup)
385                 return;
386         /*
387          * We don't check PCG_USED bit. It's cleared when the "page" is finally
388          * removed from global LRU.
389          */
390         mz = page_cgroup_zoneinfo(pc);
391         mem = pc->mem_cgroup;
392         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
393         list_del_init(&pc->lru);
394         return;
395 }
396
397 void mem_cgroup_del_lru(struct page *page)
398 {
399         mem_cgroup_del_lru_list(page, page_lru(page));
400 }
401
402 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
403 {
404         struct mem_cgroup_per_zone *mz;
405         struct page_cgroup *pc;
406
407         if (mem_cgroup_disabled())
408                 return;
409
410         pc = lookup_page_cgroup(page);
411         /*
412          * Used bit is set without atomic ops but after smp_wmb().
413          * For making pc->mem_cgroup visible, insert smp_rmb() here.
414          */
415         smp_rmb();
416         /* unused page is not rotated. */
417         if (!PageCgroupUsed(pc))
418                 return;
419         mz = page_cgroup_zoneinfo(pc);
420         list_move(&pc->lru, &mz->lists[lru]);
421 }
422
423 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
424 {
425         struct page_cgroup *pc;
426         struct mem_cgroup_per_zone *mz;
427
428         if (mem_cgroup_disabled())
429                 return;
430         pc = lookup_page_cgroup(page);
431         /*
432          * Used bit is set without atomic ops but after smp_wmb().
433          * For making pc->mem_cgroup visible, insert smp_rmb() here.
434          */
435         smp_rmb();
436         if (!PageCgroupUsed(pc))
437                 return;
438
439         mz = page_cgroup_zoneinfo(pc);
440         MEM_CGROUP_ZSTAT(mz, lru) += 1;
441         list_add(&pc->lru, &mz->lists[lru]);
442 }
443
444 /*
445  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
446  * lru because the page may.be reused after it's fully uncharged (because of
447  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
448  * it again. This function is only used to charge SwapCache. It's done under
449  * lock_page and expected that zone->lru_lock is never held.
450  */
451 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
452 {
453         unsigned long flags;
454         struct zone *zone = page_zone(page);
455         struct page_cgroup *pc = lookup_page_cgroup(page);
456
457         spin_lock_irqsave(&zone->lru_lock, flags);
458         /*
459          * Forget old LRU when this page_cgroup is *not* used. This Used bit
460          * is guarded by lock_page() because the page is SwapCache.
461          */
462         if (!PageCgroupUsed(pc))
463                 mem_cgroup_del_lru_list(page, page_lru(page));
464         spin_unlock_irqrestore(&zone->lru_lock, flags);
465 }
466
467 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
468 {
469         unsigned long flags;
470         struct zone *zone = page_zone(page);
471         struct page_cgroup *pc = lookup_page_cgroup(page);
472
473         spin_lock_irqsave(&zone->lru_lock, flags);
474         /* link when the page is linked to LRU but page_cgroup isn't */
475         if (PageLRU(page) && list_empty(&pc->lru))
476                 mem_cgroup_add_lru_list(page, page_lru(page));
477         spin_unlock_irqrestore(&zone->lru_lock, flags);
478 }
479
480
481 void mem_cgroup_move_lists(struct page *page,
482                            enum lru_list from, enum lru_list to)
483 {
484         if (mem_cgroup_disabled())
485                 return;
486         mem_cgroup_del_lru_list(page, from);
487         mem_cgroup_add_lru_list(page, to);
488 }
489
490 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
491 {
492         int ret;
493         struct mem_cgroup *curr = NULL;
494
495         task_lock(task);
496         rcu_read_lock();
497         curr = try_get_mem_cgroup_from_mm(task->mm);
498         rcu_read_unlock();
499         task_unlock(task);
500         if (!curr)
501                 return 0;
502         if (curr->use_hierarchy)
503                 ret = css_is_ancestor(&curr->css, &mem->css);
504         else
505                 ret = (curr == mem);
506         css_put(&curr->css);
507         return ret;
508 }
509
510 /*
511  * Calculate mapped_ratio under memory controller. This will be used in
512  * vmscan.c for deteremining we have to reclaim mapped pages.
513  */
514 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
515 {
516         long total, rss;
517
518         /*
519          * usage is recorded in bytes. But, here, we assume the number of
520          * physical pages can be represented by "long" on any arch.
521          */
522         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
523         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
524         return (int)((rss * 100L) / total);
525 }
526
527 /*
528  * prev_priority control...this will be used in memory reclaim path.
529  */
530 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
531 {
532         int prev_priority;
533
534         spin_lock(&mem->reclaim_param_lock);
535         prev_priority = mem->prev_priority;
536         spin_unlock(&mem->reclaim_param_lock);
537
538         return prev_priority;
539 }
540
541 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
542 {
543         spin_lock(&mem->reclaim_param_lock);
544         if (priority < mem->prev_priority)
545                 mem->prev_priority = priority;
546         spin_unlock(&mem->reclaim_param_lock);
547 }
548
549 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
550 {
551         spin_lock(&mem->reclaim_param_lock);
552         mem->prev_priority = priority;
553         spin_unlock(&mem->reclaim_param_lock);
554 }
555
556 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
557 {
558         unsigned long active;
559         unsigned long inactive;
560         unsigned long gb;
561         unsigned long inactive_ratio;
562
563         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
564         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
565
566         gb = (inactive + active) >> (30 - PAGE_SHIFT);
567         if (gb)
568                 inactive_ratio = int_sqrt(10 * gb);
569         else
570                 inactive_ratio = 1;
571
572         if (present_pages) {
573                 present_pages[0] = inactive;
574                 present_pages[1] = active;
575         }
576
577         return inactive_ratio;
578 }
579
580 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
581 {
582         unsigned long active;
583         unsigned long inactive;
584         unsigned long present_pages[2];
585         unsigned long inactive_ratio;
586
587         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
588
589         inactive = present_pages[0];
590         active = present_pages[1];
591
592         if (inactive * inactive_ratio < active)
593                 return 1;
594
595         return 0;
596 }
597
598 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
599                                        struct zone *zone,
600                                        enum lru_list lru)
601 {
602         int nid = zone->zone_pgdat->node_id;
603         int zid = zone_idx(zone);
604         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
605
606         return MEM_CGROUP_ZSTAT(mz, lru);
607 }
608
609 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
610                                                       struct zone *zone)
611 {
612         int nid = zone->zone_pgdat->node_id;
613         int zid = zone_idx(zone);
614         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
615
616         return &mz->reclaim_stat;
617 }
618
619 struct zone_reclaim_stat *
620 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
621 {
622         struct page_cgroup *pc;
623         struct mem_cgroup_per_zone *mz;
624
625         if (mem_cgroup_disabled())
626                 return NULL;
627
628         pc = lookup_page_cgroup(page);
629         /*
630          * Used bit is set without atomic ops but after smp_wmb().
631          * For making pc->mem_cgroup visible, insert smp_rmb() here.
632          */
633         smp_rmb();
634         if (!PageCgroupUsed(pc))
635                 return NULL;
636
637         mz = page_cgroup_zoneinfo(pc);
638         if (!mz)
639                 return NULL;
640
641         return &mz->reclaim_stat;
642 }
643
644 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
645                                         struct list_head *dst,
646                                         unsigned long *scanned, int order,
647                                         int mode, struct zone *z,
648                                         struct mem_cgroup *mem_cont,
649                                         int active, int file)
650 {
651         unsigned long nr_taken = 0;
652         struct page *page;
653         unsigned long scan;
654         LIST_HEAD(pc_list);
655         struct list_head *src;
656         struct page_cgroup *pc, *tmp;
657         int nid = z->zone_pgdat->node_id;
658         int zid = zone_idx(z);
659         struct mem_cgroup_per_zone *mz;
660         int lru = LRU_FILE * !!file + !!active;
661
662         BUG_ON(!mem_cont);
663         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
664         src = &mz->lists[lru];
665
666         scan = 0;
667         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
668                 if (scan >= nr_to_scan)
669                         break;
670
671                 page = pc->page;
672                 if (unlikely(!PageCgroupUsed(pc)))
673                         continue;
674                 if (unlikely(!PageLRU(page)))
675                         continue;
676
677                 scan++;
678                 if (__isolate_lru_page(page, mode, file) == 0) {
679                         list_move(&page->lru, dst);
680                         nr_taken++;
681                 }
682         }
683
684         *scanned = scan;
685         return nr_taken;
686 }
687
688 #define mem_cgroup_from_res_counter(counter, member)    \
689         container_of(counter, struct mem_cgroup, member)
690
691 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
692 {
693         if (do_swap_account) {
694                 if (res_counter_check_under_limit(&mem->res) &&
695                         res_counter_check_under_limit(&mem->memsw))
696                         return true;
697         } else
698                 if (res_counter_check_under_limit(&mem->res))
699                         return true;
700         return false;
701 }
702
703 static unsigned int get_swappiness(struct mem_cgroup *memcg)
704 {
705         struct cgroup *cgrp = memcg->css.cgroup;
706         unsigned int swappiness;
707
708         /* root ? */
709         if (cgrp->parent == NULL)
710                 return vm_swappiness;
711
712         spin_lock(&memcg->reclaim_param_lock);
713         swappiness = memcg->swappiness;
714         spin_unlock(&memcg->reclaim_param_lock);
715
716         return swappiness;
717 }
718
719 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
720 {
721         int *val = data;
722         (*val)++;
723         return 0;
724 }
725
726 /**
727  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
728  * @memcg: The memory cgroup that went over limit
729  * @p: Task that is going to be killed
730  *
731  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
732  * enabled
733  */
734 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
735 {
736         struct cgroup *task_cgrp;
737         struct cgroup *mem_cgrp;
738         /*
739          * Need a buffer in BSS, can't rely on allocations. The code relies
740          * on the assumption that OOM is serialized for memory controller.
741          * If this assumption is broken, revisit this code.
742          */
743         static char memcg_name[PATH_MAX];
744         int ret;
745
746         if (!memcg)
747                 return;
748
749
750         rcu_read_lock();
751
752         mem_cgrp = memcg->css.cgroup;
753         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
754
755         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
756         if (ret < 0) {
757                 /*
758                  * Unfortunately, we are unable to convert to a useful name
759                  * But we'll still print out the usage information
760                  */
761                 rcu_read_unlock();
762                 goto done;
763         }
764         rcu_read_unlock();
765
766         printk(KERN_INFO "Task in %s killed", memcg_name);
767
768         rcu_read_lock();
769         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
770         if (ret < 0) {
771                 rcu_read_unlock();
772                 goto done;
773         }
774         rcu_read_unlock();
775
776         /*
777          * Continues from above, so we don't need an KERN_ level
778          */
779         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
780 done:
781
782         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
783                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
784                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
785                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
786         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
787                 "failcnt %llu\n",
788                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
789                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
790                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
791 }
792
793 /*
794  * This function returns the number of memcg under hierarchy tree. Returns
795  * 1(self count) if no children.
796  */
797 static int mem_cgroup_count_children(struct mem_cgroup *mem)
798 {
799         int num = 0;
800         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
801         return num;
802 }
803
804 /*
805  * Visit the first child (need not be the first child as per the ordering
806  * of the cgroup list, since we track last_scanned_child) of @mem and use
807  * that to reclaim free pages from.
808  */
809 static struct mem_cgroup *
810 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
811 {
812         struct mem_cgroup *ret = NULL;
813         struct cgroup_subsys_state *css;
814         int nextid, found;
815
816         if (!root_mem->use_hierarchy) {
817                 css_get(&root_mem->css);
818                 ret = root_mem;
819         }
820
821         while (!ret) {
822                 rcu_read_lock();
823                 nextid = root_mem->last_scanned_child + 1;
824                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
825                                    &found);
826                 if (css && css_tryget(css))
827                         ret = container_of(css, struct mem_cgroup, css);
828
829                 rcu_read_unlock();
830                 /* Updates scanning parameter */
831                 spin_lock(&root_mem->reclaim_param_lock);
832                 if (!css) {
833                         /* this means start scan from ID:1 */
834                         root_mem->last_scanned_child = 0;
835                 } else
836                         root_mem->last_scanned_child = found;
837                 spin_unlock(&root_mem->reclaim_param_lock);
838         }
839
840         return ret;
841 }
842
843 /*
844  * Scan the hierarchy if needed to reclaim memory. We remember the last child
845  * we reclaimed from, so that we don't end up penalizing one child extensively
846  * based on its position in the children list.
847  *
848  * root_mem is the original ancestor that we've been reclaim from.
849  *
850  * We give up and return to the caller when we visit root_mem twice.
851  * (other groups can be removed while we're walking....)
852  *
853  * If shrink==true, for avoiding to free too much, this returns immedieately.
854  */
855 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
856                                    gfp_t gfp_mask, bool noswap, bool shrink)
857 {
858         struct mem_cgroup *victim;
859         int ret, total = 0;
860         int loop = 0;
861
862         while (loop < 2) {
863                 victim = mem_cgroup_select_victim(root_mem);
864                 if (victim == root_mem)
865                         loop++;
866                 if (!mem_cgroup_local_usage(&victim->stat)) {
867                         /* this cgroup's local usage == 0 */
868                         css_put(&victim->css);
869                         continue;
870                 }
871                 /* we use swappiness of local cgroup */
872                 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
873                                                    get_swappiness(victim));
874                 css_put(&victim->css);
875                 /*
876                  * At shrinking usage, we can't check we should stop here or
877                  * reclaim more. It's depends on callers. last_scanned_child
878                  * will work enough for keeping fairness under tree.
879                  */
880                 if (shrink)
881                         return ret;
882                 total += ret;
883                 if (mem_cgroup_check_under_limit(root_mem))
884                         return 1 + total;
885         }
886         return total;
887 }
888
889 bool mem_cgroup_oom_called(struct task_struct *task)
890 {
891         bool ret = false;
892         struct mem_cgroup *mem;
893         struct mm_struct *mm;
894
895         rcu_read_lock();
896         mm = task->mm;
897         if (!mm)
898                 mm = &init_mm;
899         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
900         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
901                 ret = true;
902         rcu_read_unlock();
903         return ret;
904 }
905
906 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
907 {
908         mem->last_oom_jiffies = jiffies;
909         return 0;
910 }
911
912 static void record_last_oom(struct mem_cgroup *mem)
913 {
914         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
915 }
916
917
918 /*
919  * Unlike exported interface, "oom" parameter is added. if oom==true,
920  * oom-killer can be invoked.
921  */
922 static int __mem_cgroup_try_charge(struct mm_struct *mm,
923                         gfp_t gfp_mask, struct mem_cgroup **memcg,
924                         bool oom)
925 {
926         struct mem_cgroup *mem, *mem_over_limit;
927         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
928         struct res_counter *fail_res;
929
930         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
931                 /* Don't account this! */
932                 *memcg = NULL;
933                 return 0;
934         }
935
936         /*
937          * We always charge the cgroup the mm_struct belongs to.
938          * The mm_struct's mem_cgroup changes on task migration if the
939          * thread group leader migrates. It's possible that mm is not
940          * set, if so charge the init_mm (happens for pagecache usage).
941          */
942         mem = *memcg;
943         if (likely(!mem)) {
944                 mem = try_get_mem_cgroup_from_mm(mm);
945                 *memcg = mem;
946         } else {
947                 css_get(&mem->css);
948         }
949         if (unlikely(!mem))
950                 return 0;
951
952         VM_BUG_ON(mem_cgroup_is_obsolete(mem));
953
954         while (1) {
955                 int ret;
956                 bool noswap = false;
957
958                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
959                 if (likely(!ret)) {
960                         if (!do_swap_account)
961                                 break;
962                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
963                                                         &fail_res);
964                         if (likely(!ret))
965                                 break;
966                         /* mem+swap counter fails */
967                         res_counter_uncharge(&mem->res, PAGE_SIZE);
968                         noswap = true;
969                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
970                                                                         memsw);
971                 } else
972                         /* mem counter fails */
973                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
974                                                                         res);
975
976                 if (!(gfp_mask & __GFP_WAIT))
977                         goto nomem;
978
979                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
980                                                         noswap, false);
981                 if (ret)
982                         continue;
983
984                 /*
985                  * try_to_free_mem_cgroup_pages() might not give us a full
986                  * picture of reclaim. Some pages are reclaimed and might be
987                  * moved to swap cache or just unmapped from the cgroup.
988                  * Check the limit again to see if the reclaim reduced the
989                  * current usage of the cgroup before giving up
990                  *
991                  */
992                 if (mem_cgroup_check_under_limit(mem_over_limit))
993                         continue;
994
995                 if (!nr_retries--) {
996                         if (oom) {
997                                 mutex_lock(&memcg_tasklist);
998                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
999                                 mutex_unlock(&memcg_tasklist);
1000                                 record_last_oom(mem_over_limit);
1001                         }
1002                         goto nomem;
1003                 }
1004         }
1005         return 0;
1006 nomem:
1007         css_put(&mem->css);
1008         return -ENOMEM;
1009 }
1010
1011 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1012 {
1013         struct mem_cgroup *mem;
1014         swp_entry_t ent;
1015
1016         if (!PageSwapCache(page))
1017                 return NULL;
1018
1019         ent.val = page_private(page);
1020         mem = lookup_swap_cgroup(ent);
1021         if (!mem)
1022                 return NULL;
1023         if (!css_tryget(&mem->css))
1024                 return NULL;
1025         return mem;
1026 }
1027
1028 /*
1029  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1030  * USED state. If already USED, uncharge and return.
1031  */
1032
1033 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1034                                      struct page_cgroup *pc,
1035                                      enum charge_type ctype)
1036 {
1037         /* try_charge() can return NULL to *memcg, taking care of it. */
1038         if (!mem)
1039                 return;
1040
1041         lock_page_cgroup(pc);
1042         if (unlikely(PageCgroupUsed(pc))) {
1043                 unlock_page_cgroup(pc);
1044                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1045                 if (do_swap_account)
1046                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1047                 css_put(&mem->css);
1048                 return;
1049         }
1050         pc->mem_cgroup = mem;
1051         smp_wmb();
1052         pc->flags = pcg_default_flags[ctype];
1053
1054         mem_cgroup_charge_statistics(mem, pc, true);
1055
1056         unlock_page_cgroup(pc);
1057 }
1058
1059 /**
1060  * mem_cgroup_move_account - move account of the page
1061  * @pc: page_cgroup of the page.
1062  * @from: mem_cgroup which the page is moved from.
1063  * @to: mem_cgroup which the page is moved to. @from != @to.
1064  *
1065  * The caller must confirm following.
1066  * - page is not on LRU (isolate_page() is useful.)
1067  *
1068  * returns 0 at success,
1069  * returns -EBUSY when lock is busy or "pc" is unstable.
1070  *
1071  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1072  * new cgroup. It should be done by a caller.
1073  */
1074
1075 static int mem_cgroup_move_account(struct page_cgroup *pc,
1076         struct mem_cgroup *from, struct mem_cgroup *to)
1077 {
1078         struct mem_cgroup_per_zone *from_mz, *to_mz;
1079         int nid, zid;
1080         int ret = -EBUSY;
1081
1082         VM_BUG_ON(from == to);
1083         VM_BUG_ON(PageLRU(pc->page));
1084
1085         nid = page_cgroup_nid(pc);
1086         zid = page_cgroup_zid(pc);
1087         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1088         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1089
1090         if (!trylock_page_cgroup(pc))
1091                 return ret;
1092
1093         if (!PageCgroupUsed(pc))
1094                 goto out;
1095
1096         if (pc->mem_cgroup != from)
1097                 goto out;
1098
1099         res_counter_uncharge(&from->res, PAGE_SIZE);
1100         mem_cgroup_charge_statistics(from, pc, false);
1101         if (do_swap_account)
1102                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1103         css_put(&from->css);
1104
1105         css_get(&to->css);
1106         pc->mem_cgroup = to;
1107         mem_cgroup_charge_statistics(to, pc, true);
1108         ret = 0;
1109 out:
1110         unlock_page_cgroup(pc);
1111         return ret;
1112 }
1113
1114 /*
1115  * move charges to its parent.
1116  */
1117
1118 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1119                                   struct mem_cgroup *child,
1120                                   gfp_t gfp_mask)
1121 {
1122         struct page *page = pc->page;
1123         struct cgroup *cg = child->css.cgroup;
1124         struct cgroup *pcg = cg->parent;
1125         struct mem_cgroup *parent;
1126         int ret;
1127
1128         /* Is ROOT ? */
1129         if (!pcg)
1130                 return -EINVAL;
1131
1132
1133         parent = mem_cgroup_from_cont(pcg);
1134
1135
1136         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1137         if (ret || !parent)
1138                 return ret;
1139
1140         if (!get_page_unless_zero(page)) {
1141                 ret = -EBUSY;
1142                 goto uncharge;
1143         }
1144
1145         ret = isolate_lru_page(page);
1146
1147         if (ret)
1148                 goto cancel;
1149
1150         ret = mem_cgroup_move_account(pc, child, parent);
1151
1152         putback_lru_page(page);
1153         if (!ret) {
1154                 put_page(page);
1155                 /* drop extra refcnt by try_charge() */
1156                 css_put(&parent->css);
1157                 return 0;
1158         }
1159
1160 cancel:
1161         put_page(page);
1162 uncharge:
1163         /* drop extra refcnt by try_charge() */
1164         css_put(&parent->css);
1165         /* uncharge if move fails */
1166         res_counter_uncharge(&parent->res, PAGE_SIZE);
1167         if (do_swap_account)
1168                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1169         return ret;
1170 }
1171
1172 /*
1173  * Charge the memory controller for page usage.
1174  * Return
1175  * 0 if the charge was successful
1176  * < 0 if the cgroup is over its limit
1177  */
1178 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1179                                 gfp_t gfp_mask, enum charge_type ctype,
1180                                 struct mem_cgroup *memcg)
1181 {
1182         struct mem_cgroup *mem;
1183         struct page_cgroup *pc;
1184         int ret;
1185
1186         pc = lookup_page_cgroup(page);
1187         /* can happen at boot */
1188         if (unlikely(!pc))
1189                 return 0;
1190         prefetchw(pc);
1191
1192         mem = memcg;
1193         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1194         if (ret || !mem)
1195                 return ret;
1196
1197         __mem_cgroup_commit_charge(mem, pc, ctype);
1198         return 0;
1199 }
1200
1201 int mem_cgroup_newpage_charge(struct page *page,
1202                               struct mm_struct *mm, gfp_t gfp_mask)
1203 {
1204         if (mem_cgroup_disabled())
1205                 return 0;
1206         if (PageCompound(page))
1207                 return 0;
1208         /*
1209          * If already mapped, we don't have to account.
1210          * If page cache, page->mapping has address_space.
1211          * But page->mapping may have out-of-use anon_vma pointer,
1212          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1213          * is NULL.
1214          */
1215         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1216                 return 0;
1217         if (unlikely(!mm))
1218                 mm = &init_mm;
1219         return mem_cgroup_charge_common(page, mm, gfp_mask,
1220                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1221 }
1222
1223 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1224                                 gfp_t gfp_mask)
1225 {
1226         struct mem_cgroup *mem = NULL;
1227         int ret;
1228
1229         if (mem_cgroup_disabled())
1230                 return 0;
1231         if (PageCompound(page))
1232                 return 0;
1233         /*
1234          * Corner case handling. This is called from add_to_page_cache()
1235          * in usual. But some FS (shmem) precharges this page before calling it
1236          * and call add_to_page_cache() with GFP_NOWAIT.
1237          *
1238          * For GFP_NOWAIT case, the page may be pre-charged before calling
1239          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1240          * charge twice. (It works but has to pay a bit larger cost.)
1241          * And when the page is SwapCache, it should take swap information
1242          * into account. This is under lock_page() now.
1243          */
1244         if (!(gfp_mask & __GFP_WAIT)) {
1245                 struct page_cgroup *pc;
1246
1247
1248                 pc = lookup_page_cgroup(page);
1249                 if (!pc)
1250                         return 0;
1251                 lock_page_cgroup(pc);
1252                 if (PageCgroupUsed(pc)) {
1253                         unlock_page_cgroup(pc);
1254                         return 0;
1255                 }
1256                 unlock_page_cgroup(pc);
1257         }
1258
1259         if (do_swap_account && PageSwapCache(page)) {
1260                 mem = try_get_mem_cgroup_from_swapcache(page);
1261                 if (mem)
1262                         mm = NULL;
1263                   else
1264                         mem = NULL;
1265                 /* SwapCache may be still linked to LRU now. */
1266                 mem_cgroup_lru_del_before_commit_swapcache(page);
1267         }
1268
1269         if (unlikely(!mm && !mem))
1270                 mm = &init_mm;
1271
1272         if (page_is_file_cache(page))
1273                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1274                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1275
1276         ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1277                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1278         if (mem)
1279                 css_put(&mem->css);
1280         if (PageSwapCache(page))
1281                 mem_cgroup_lru_add_after_commit_swapcache(page);
1282
1283         if (do_swap_account && !ret && PageSwapCache(page)) {
1284                 swp_entry_t ent = {.val = page_private(page)};
1285                 /* avoid double counting */
1286                 mem = swap_cgroup_record(ent, NULL);
1287                 if (mem) {
1288                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1289                         mem_cgroup_put(mem);
1290                 }
1291         }
1292         return ret;
1293 }
1294
1295 /*
1296  * While swap-in, try_charge -> commit or cancel, the page is locked.
1297  * And when try_charge() successfully returns, one refcnt to memcg without
1298  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1299  * "commit()" or removed by "cancel()"
1300  */
1301 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1302                                  struct page *page,
1303                                  gfp_t mask, struct mem_cgroup **ptr)
1304 {
1305         struct mem_cgroup *mem;
1306         int ret;
1307
1308         if (mem_cgroup_disabled())
1309                 return 0;
1310
1311         if (!do_swap_account)
1312                 goto charge_cur_mm;
1313         /*
1314          * A racing thread's fault, or swapoff, may have already updated
1315          * the pte, and even removed page from swap cache: return success
1316          * to go on to do_swap_page()'s pte_same() test, which should fail.
1317          */
1318         if (!PageSwapCache(page))
1319                 return 0;
1320         mem = try_get_mem_cgroup_from_swapcache(page);
1321         if (!mem)
1322                 goto charge_cur_mm;
1323         *ptr = mem;
1324         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1325         /* drop extra refcnt from tryget */
1326         css_put(&mem->css);
1327         return ret;
1328 charge_cur_mm:
1329         if (unlikely(!mm))
1330                 mm = &init_mm;
1331         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1332 }
1333
1334 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1335 {
1336         struct page_cgroup *pc;
1337
1338         if (mem_cgroup_disabled())
1339                 return;
1340         if (!ptr)
1341                 return;
1342         pc = lookup_page_cgroup(page);
1343         mem_cgroup_lru_del_before_commit_swapcache(page);
1344         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1345         mem_cgroup_lru_add_after_commit_swapcache(page);
1346         /*
1347          * Now swap is on-memory. This means this page may be
1348          * counted both as mem and swap....double count.
1349          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1350          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1351          * may call delete_from_swap_cache() before reach here.
1352          */
1353         if (do_swap_account && PageSwapCache(page)) {
1354                 swp_entry_t ent = {.val = page_private(page)};
1355                 struct mem_cgroup *memcg;
1356                 memcg = swap_cgroup_record(ent, NULL);
1357                 if (memcg) {
1358                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1359                         mem_cgroup_put(memcg);
1360                 }
1361
1362         }
1363         /* add this page(page_cgroup) to the LRU we want. */
1364
1365 }
1366
1367 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1368 {
1369         if (mem_cgroup_disabled())
1370                 return;
1371         if (!mem)
1372                 return;
1373         res_counter_uncharge(&mem->res, PAGE_SIZE);
1374         if (do_swap_account)
1375                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1376         css_put(&mem->css);
1377 }
1378
1379
1380 /*
1381  * uncharge if !page_mapped(page)
1382  */
1383 static struct mem_cgroup *
1384 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1385 {
1386         struct page_cgroup *pc;
1387         struct mem_cgroup *mem = NULL;
1388         struct mem_cgroup_per_zone *mz;
1389
1390         if (mem_cgroup_disabled())
1391                 return NULL;
1392
1393         if (PageSwapCache(page))
1394                 return NULL;
1395
1396         /*
1397          * Check if our page_cgroup is valid
1398          */
1399         pc = lookup_page_cgroup(page);
1400         if (unlikely(!pc || !PageCgroupUsed(pc)))
1401                 return NULL;
1402
1403         lock_page_cgroup(pc);
1404
1405         mem = pc->mem_cgroup;
1406
1407         if (!PageCgroupUsed(pc))
1408                 goto unlock_out;
1409
1410         switch (ctype) {
1411         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1412                 if (page_mapped(page))
1413                         goto unlock_out;
1414                 break;
1415         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1416                 if (!PageAnon(page)) {  /* Shared memory */
1417                         if (page->mapping && !page_is_file_cache(page))
1418                                 goto unlock_out;
1419                 } else if (page_mapped(page)) /* Anon */
1420                                 goto unlock_out;
1421                 break;
1422         default:
1423                 break;
1424         }
1425
1426         res_counter_uncharge(&mem->res, PAGE_SIZE);
1427         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1428                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1429         mem_cgroup_charge_statistics(mem, pc, false);
1430
1431         ClearPageCgroupUsed(pc);
1432         /*
1433          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1434          * freed from LRU. This is safe because uncharged page is expected not
1435          * to be reused (freed soon). Exception is SwapCache, it's handled by
1436          * special functions.
1437          */
1438
1439         mz = page_cgroup_zoneinfo(pc);
1440         unlock_page_cgroup(pc);
1441
1442         /* at swapout, this memcg will be accessed to record to swap */
1443         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1444                 css_put(&mem->css);
1445
1446         return mem;
1447
1448 unlock_out:
1449         unlock_page_cgroup(pc);
1450         return NULL;
1451 }
1452
1453 void mem_cgroup_uncharge_page(struct page *page)
1454 {
1455         /* early check. */
1456         if (page_mapped(page))
1457                 return;
1458         if (page->mapping && !PageAnon(page))
1459                 return;
1460         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1461 }
1462
1463 void mem_cgroup_uncharge_cache_page(struct page *page)
1464 {
1465         VM_BUG_ON(page_mapped(page));
1466         VM_BUG_ON(page->mapping);
1467         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1468 }
1469
1470 /*
1471  * called from __delete_from_swap_cache() and drop "page" account.
1472  * memcg information is recorded to swap_cgroup of "ent"
1473  */
1474 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1475 {
1476         struct mem_cgroup *memcg;
1477
1478         memcg = __mem_cgroup_uncharge_common(page,
1479                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1480         /* record memcg information */
1481         if (do_swap_account && memcg) {
1482                 swap_cgroup_record(ent, memcg);
1483                 mem_cgroup_get(memcg);
1484         }
1485         if (memcg)
1486                 css_put(&memcg->css);
1487 }
1488
1489 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1490 /*
1491  * called from swap_entry_free(). remove record in swap_cgroup and
1492  * uncharge "memsw" account.
1493  */
1494 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1495 {
1496         struct mem_cgroup *memcg;
1497
1498         if (!do_swap_account)
1499                 return;
1500
1501         memcg = swap_cgroup_record(ent, NULL);
1502         if (memcg) {
1503                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1504                 mem_cgroup_put(memcg);
1505         }
1506 }
1507 #endif
1508
1509 /*
1510  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1511  * page belongs to.
1512  */
1513 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1514 {
1515         struct page_cgroup *pc;
1516         struct mem_cgroup *mem = NULL;
1517         int ret = 0;
1518
1519         if (mem_cgroup_disabled())
1520                 return 0;
1521
1522         pc = lookup_page_cgroup(page);
1523         lock_page_cgroup(pc);
1524         if (PageCgroupUsed(pc)) {
1525                 mem = pc->mem_cgroup;
1526                 css_get(&mem->css);
1527         }
1528         unlock_page_cgroup(pc);
1529
1530         if (mem) {
1531                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1532                 css_put(&mem->css);
1533         }
1534         *ptr = mem;
1535         return ret;
1536 }
1537
1538 /* remove redundant charge if migration failed*/
1539 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1540                 struct page *oldpage, struct page *newpage)
1541 {
1542         struct page *target, *unused;
1543         struct page_cgroup *pc;
1544         enum charge_type ctype;
1545
1546         if (!mem)
1547                 return;
1548
1549         /* at migration success, oldpage->mapping is NULL. */
1550         if (oldpage->mapping) {
1551                 target = oldpage;
1552                 unused = NULL;
1553         } else {
1554                 target = newpage;
1555                 unused = oldpage;
1556         }
1557
1558         if (PageAnon(target))
1559                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1560         else if (page_is_file_cache(target))
1561                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1562         else
1563                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1564
1565         /* unused page is not on radix-tree now. */
1566         if (unused)
1567                 __mem_cgroup_uncharge_common(unused, ctype);
1568
1569         pc = lookup_page_cgroup(target);
1570         /*
1571          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1572          * So, double-counting is effectively avoided.
1573          */
1574         __mem_cgroup_commit_charge(mem, pc, ctype);
1575
1576         /*
1577          * Both of oldpage and newpage are still under lock_page().
1578          * Then, we don't have to care about race in radix-tree.
1579          * But we have to be careful that this page is unmapped or not.
1580          *
1581          * There is a case for !page_mapped(). At the start of
1582          * migration, oldpage was mapped. But now, it's zapped.
1583          * But we know *target* page is not freed/reused under us.
1584          * mem_cgroup_uncharge_page() does all necessary checks.
1585          */
1586         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1587                 mem_cgroup_uncharge_page(target);
1588 }
1589
1590 /*
1591  * A call to try to shrink memory usage under specified resource controller.
1592  * This is typically used for page reclaiming for shmem for reducing side
1593  * effect of page allocation from shmem, which is used by some mem_cgroup.
1594  */
1595 int mem_cgroup_shrink_usage(struct page *page,
1596                             struct mm_struct *mm,
1597                             gfp_t gfp_mask)
1598 {
1599         struct mem_cgroup *mem = NULL;
1600         int progress = 0;
1601         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1602
1603         if (mem_cgroup_disabled())
1604                 return 0;
1605         if (page)
1606                 mem = try_get_mem_cgroup_from_swapcache(page);
1607         if (!mem && mm)
1608                 mem = try_get_mem_cgroup_from_mm(mm);
1609         if (unlikely(!mem))
1610                 return 0;
1611
1612         do {
1613                 progress = mem_cgroup_hierarchical_reclaim(mem,
1614                                         gfp_mask, true, false);
1615                 progress += mem_cgroup_check_under_limit(mem);
1616         } while (!progress && --retry);
1617
1618         css_put(&mem->css);
1619         if (!retry)
1620                 return -ENOMEM;
1621         return 0;
1622 }
1623
1624 static DEFINE_MUTEX(set_limit_mutex);
1625
1626 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1627                                 unsigned long long val)
1628 {
1629         int retry_count;
1630         int progress;
1631         u64 memswlimit;
1632         int ret = 0;
1633         int children = mem_cgroup_count_children(memcg);
1634         u64 curusage, oldusage;
1635
1636         /*
1637          * For keeping hierarchical_reclaim simple, how long we should retry
1638          * is depends on callers. We set our retry-count to be function
1639          * of # of children which we should visit in this loop.
1640          */
1641         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1642
1643         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1644
1645         while (retry_count) {
1646                 if (signal_pending(current)) {
1647                         ret = -EINTR;
1648                         break;
1649                 }
1650                 /*
1651                  * Rather than hide all in some function, I do this in
1652                  * open coded manner. You see what this really does.
1653                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1654                  */
1655                 mutex_lock(&set_limit_mutex);
1656                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1657                 if (memswlimit < val) {
1658                         ret = -EINVAL;
1659                         mutex_unlock(&set_limit_mutex);
1660                         break;
1661                 }
1662                 ret = res_counter_set_limit(&memcg->res, val);
1663                 mutex_unlock(&set_limit_mutex);
1664
1665                 if (!ret)
1666                         break;
1667
1668                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1669                                                    false, true);
1670                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1671                 /* Usage is reduced ? */
1672                 if (curusage >= oldusage)
1673                         retry_count--;
1674                 else
1675                         oldusage = curusage;
1676         }
1677
1678         return ret;
1679 }
1680
1681 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1682                                 unsigned long long val)
1683 {
1684         int retry_count;
1685         u64 memlimit, oldusage, curusage;
1686         int children = mem_cgroup_count_children(memcg);
1687         int ret = -EBUSY;
1688
1689         if (!do_swap_account)
1690                 return -EINVAL;
1691         /* see mem_cgroup_resize_res_limit */
1692         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1693         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1694         while (retry_count) {
1695                 if (signal_pending(current)) {
1696                         ret = -EINTR;
1697                         break;
1698                 }
1699                 /*
1700                  * Rather than hide all in some function, I do this in
1701                  * open coded manner. You see what this really does.
1702                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1703                  */
1704                 mutex_lock(&set_limit_mutex);
1705                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1706                 if (memlimit > val) {
1707                         ret = -EINVAL;
1708                         mutex_unlock(&set_limit_mutex);
1709                         break;
1710                 }
1711                 ret = res_counter_set_limit(&memcg->memsw, val);
1712                 mutex_unlock(&set_limit_mutex);
1713
1714                 if (!ret)
1715                         break;
1716
1717                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1718                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1719                 /* Usage is reduced ? */
1720                 if (curusage >= oldusage)
1721                         retry_count--;
1722                 else
1723                         oldusage = curusage;
1724         }
1725         return ret;
1726 }
1727
1728 /*
1729  * This routine traverse page_cgroup in given list and drop them all.
1730  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1731  */
1732 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1733                                 int node, int zid, enum lru_list lru)
1734 {
1735         struct zone *zone;
1736         struct mem_cgroup_per_zone *mz;
1737         struct page_cgroup *pc, *busy;
1738         unsigned long flags, loop;
1739         struct list_head *list;
1740         int ret = 0;
1741
1742         zone = &NODE_DATA(node)->node_zones[zid];
1743         mz = mem_cgroup_zoneinfo(mem, node, zid);
1744         list = &mz->lists[lru];
1745
1746         loop = MEM_CGROUP_ZSTAT(mz, lru);
1747         /* give some margin against EBUSY etc...*/
1748         loop += 256;
1749         busy = NULL;
1750         while (loop--) {
1751                 ret = 0;
1752                 spin_lock_irqsave(&zone->lru_lock, flags);
1753                 if (list_empty(list)) {
1754                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1755                         break;
1756                 }
1757                 pc = list_entry(list->prev, struct page_cgroup, lru);
1758                 if (busy == pc) {
1759                         list_move(&pc->lru, list);
1760                         busy = 0;
1761                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1762                         continue;
1763                 }
1764                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1765
1766                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1767                 if (ret == -ENOMEM)
1768                         break;
1769
1770                 if (ret == -EBUSY || ret == -EINVAL) {
1771                         /* found lock contention or "pc" is obsolete. */
1772                         busy = pc;
1773                         cond_resched();
1774                 } else
1775                         busy = NULL;
1776         }
1777
1778         if (!ret && !list_empty(list))
1779                 return -EBUSY;
1780         return ret;
1781 }
1782
1783 /*
1784  * make mem_cgroup's charge to be 0 if there is no task.
1785  * This enables deleting this mem_cgroup.
1786  */
1787 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1788 {
1789         int ret;
1790         int node, zid, shrink;
1791         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1792         struct cgroup *cgrp = mem->css.cgroup;
1793
1794         css_get(&mem->css);
1795
1796         shrink = 0;
1797         /* should free all ? */
1798         if (free_all)
1799                 goto try_to_free;
1800 move_account:
1801         while (mem->res.usage > 0) {
1802                 ret = -EBUSY;
1803                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1804                         goto out;
1805                 ret = -EINTR;
1806                 if (signal_pending(current))
1807                         goto out;
1808                 /* This is for making all *used* pages to be on LRU. */
1809                 lru_add_drain_all();
1810                 ret = 0;
1811                 for_each_node_state(node, N_HIGH_MEMORY) {
1812                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1813                                 enum lru_list l;
1814                                 for_each_lru(l) {
1815                                         ret = mem_cgroup_force_empty_list(mem,
1816                                                         node, zid, l);
1817                                         if (ret)
1818                                                 break;
1819                                 }
1820                         }
1821                         if (ret)
1822                                 break;
1823                 }
1824                 /* it seems parent cgroup doesn't have enough mem */
1825                 if (ret == -ENOMEM)
1826                         goto try_to_free;
1827                 cond_resched();
1828         }
1829         ret = 0;
1830 out:
1831         css_put(&mem->css);
1832         return ret;
1833
1834 try_to_free:
1835         /* returns EBUSY if there is a task or if we come here twice. */
1836         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1837                 ret = -EBUSY;
1838                 goto out;
1839         }
1840         /* we call try-to-free pages for make this cgroup empty */
1841         lru_add_drain_all();
1842         /* try to free all pages in this cgroup */
1843         shrink = 1;
1844         while (nr_retries && mem->res.usage > 0) {
1845                 int progress;
1846
1847                 if (signal_pending(current)) {
1848                         ret = -EINTR;
1849                         goto out;
1850                 }
1851                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1852                                                 false, get_swappiness(mem));
1853                 if (!progress) {
1854                         nr_retries--;
1855                         /* maybe some writeback is necessary */
1856                         congestion_wait(WRITE, HZ/10);
1857                 }
1858
1859         }
1860         lru_add_drain();
1861         /* try move_account...there may be some *locked* pages. */
1862         if (mem->res.usage)
1863                 goto move_account;
1864         ret = 0;
1865         goto out;
1866 }
1867
1868 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1869 {
1870         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1871 }
1872
1873
1874 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1875 {
1876         return mem_cgroup_from_cont(cont)->use_hierarchy;
1877 }
1878
1879 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1880                                         u64 val)
1881 {
1882         int retval = 0;
1883         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1884         struct cgroup *parent = cont->parent;
1885         struct mem_cgroup *parent_mem = NULL;
1886
1887         if (parent)
1888                 parent_mem = mem_cgroup_from_cont(parent);
1889
1890         cgroup_lock();
1891         /*
1892          * If parent's use_hiearchy is set, we can't make any modifications
1893          * in the child subtrees. If it is unset, then the change can
1894          * occur, provided the current cgroup has no children.
1895          *
1896          * For the root cgroup, parent_mem is NULL, we allow value to be
1897          * set if there are no children.
1898          */
1899         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1900                                 (val == 1 || val == 0)) {
1901                 if (list_empty(&cont->children))
1902                         mem->use_hierarchy = val;
1903                 else
1904                         retval = -EBUSY;
1905         } else
1906                 retval = -EINVAL;
1907         cgroup_unlock();
1908
1909         return retval;
1910 }
1911
1912 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1913 {
1914         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1915         u64 val = 0;
1916         int type, name;
1917
1918         type = MEMFILE_TYPE(cft->private);
1919         name = MEMFILE_ATTR(cft->private);
1920         switch (type) {
1921         case _MEM:
1922                 val = res_counter_read_u64(&mem->res, name);
1923                 break;
1924         case _MEMSWAP:
1925                 if (do_swap_account)
1926                         val = res_counter_read_u64(&mem->memsw, name);
1927                 break;
1928         default:
1929                 BUG();
1930                 break;
1931         }
1932         return val;
1933 }
1934 /*
1935  * The user of this function is...
1936  * RES_LIMIT.
1937  */
1938 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1939                             const char *buffer)
1940 {
1941         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1942         int type, name;
1943         unsigned long long val;
1944         int ret;
1945
1946         type = MEMFILE_TYPE(cft->private);
1947         name = MEMFILE_ATTR(cft->private);
1948         switch (name) {
1949         case RES_LIMIT:
1950                 /* This function does all necessary parse...reuse it */
1951                 ret = res_counter_memparse_write_strategy(buffer, &val);
1952                 if (ret)
1953                         break;
1954                 if (type == _MEM)
1955                         ret = mem_cgroup_resize_limit(memcg, val);
1956                 else
1957                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1958                 break;
1959         default:
1960                 ret = -EINVAL; /* should be BUG() ? */
1961                 break;
1962         }
1963         return ret;
1964 }
1965
1966 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1967                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
1968 {
1969         struct cgroup *cgroup;
1970         unsigned long long min_limit, min_memsw_limit, tmp;
1971
1972         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1973         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1974         cgroup = memcg->css.cgroup;
1975         if (!memcg->use_hierarchy)
1976                 goto out;
1977
1978         while (cgroup->parent) {
1979                 cgroup = cgroup->parent;
1980                 memcg = mem_cgroup_from_cont(cgroup);
1981                 if (!memcg->use_hierarchy)
1982                         break;
1983                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
1984                 min_limit = min(min_limit, tmp);
1985                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1986                 min_memsw_limit = min(min_memsw_limit, tmp);
1987         }
1988 out:
1989         *mem_limit = min_limit;
1990         *memsw_limit = min_memsw_limit;
1991         return;
1992 }
1993
1994 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1995 {
1996         struct mem_cgroup *mem;
1997         int type, name;
1998
1999         mem = mem_cgroup_from_cont(cont);
2000         type = MEMFILE_TYPE(event);
2001         name = MEMFILE_ATTR(event);
2002         switch (name) {
2003         case RES_MAX_USAGE:
2004                 if (type == _MEM)
2005                         res_counter_reset_max(&mem->res);
2006                 else
2007                         res_counter_reset_max(&mem->memsw);
2008                 break;
2009         case RES_FAILCNT:
2010                 if (type == _MEM)
2011                         res_counter_reset_failcnt(&mem->res);
2012                 else
2013                         res_counter_reset_failcnt(&mem->memsw);
2014                 break;
2015         }
2016         return 0;
2017 }
2018
2019
2020 /* For read statistics */
2021 enum {
2022         MCS_CACHE,
2023         MCS_RSS,
2024         MCS_PGPGIN,
2025         MCS_PGPGOUT,
2026         MCS_INACTIVE_ANON,
2027         MCS_ACTIVE_ANON,
2028         MCS_INACTIVE_FILE,
2029         MCS_ACTIVE_FILE,
2030         MCS_UNEVICTABLE,
2031         NR_MCS_STAT,
2032 };
2033
2034 struct mcs_total_stat {
2035         s64 stat[NR_MCS_STAT];
2036 };
2037
2038 struct {
2039         char *local_name;
2040         char *total_name;
2041 } memcg_stat_strings[NR_MCS_STAT] = {
2042         {"cache", "total_cache"},
2043         {"rss", "total_rss"},
2044         {"pgpgin", "total_pgpgin"},
2045         {"pgpgout", "total_pgpgout"},
2046         {"inactive_anon", "total_inactive_anon"},
2047         {"active_anon", "total_active_anon"},
2048         {"inactive_file", "total_inactive_file"},
2049         {"active_file", "total_active_file"},
2050         {"unevictable", "total_unevictable"}
2051 };
2052
2053
2054 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2055 {
2056         struct mcs_total_stat *s = data;
2057         s64 val;
2058
2059         /* per cpu stat */
2060         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2061         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2062         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2063         s->stat[MCS_RSS] += val * PAGE_SIZE;
2064         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2065         s->stat[MCS_PGPGIN] += val;
2066         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2067         s->stat[MCS_PGPGOUT] += val;
2068
2069         /* per zone stat */
2070         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2071         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2072         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2073         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2074         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2075         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2076         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2077         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2078         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2079         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2080         return 0;
2081 }
2082
2083 static void
2084 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2085 {
2086         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2087 }
2088
2089 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2090                                  struct cgroup_map_cb *cb)
2091 {
2092         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2093         struct mcs_total_stat mystat;
2094         int i;
2095
2096         memset(&mystat, 0, sizeof(mystat));
2097         mem_cgroup_get_local_stat(mem_cont, &mystat);
2098
2099         for (i = 0; i < NR_MCS_STAT; i++)
2100                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2101
2102         /* Hierarchical information */
2103         {
2104                 unsigned long long limit, memsw_limit;
2105                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2106                 cb->fill(cb, "hierarchical_memory_limit", limit);
2107                 if (do_swap_account)
2108                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2109         }
2110
2111         memset(&mystat, 0, sizeof(mystat));
2112         mem_cgroup_get_total_stat(mem_cont, &mystat);
2113         for (i = 0; i < NR_MCS_STAT; i++)
2114                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2115
2116
2117 #ifdef CONFIG_DEBUG_VM
2118         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2119
2120         {
2121                 int nid, zid;
2122                 struct mem_cgroup_per_zone *mz;
2123                 unsigned long recent_rotated[2] = {0, 0};
2124                 unsigned long recent_scanned[2] = {0, 0};
2125
2126                 for_each_online_node(nid)
2127                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2128                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2129
2130                                 recent_rotated[0] +=
2131                                         mz->reclaim_stat.recent_rotated[0];
2132                                 recent_rotated[1] +=
2133                                         mz->reclaim_stat.recent_rotated[1];
2134                                 recent_scanned[0] +=
2135                                         mz->reclaim_stat.recent_scanned[0];
2136                                 recent_scanned[1] +=
2137                                         mz->reclaim_stat.recent_scanned[1];
2138                         }
2139                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2140                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2141                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2142                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2143         }
2144 #endif
2145
2146         return 0;
2147 }
2148
2149 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2150 {
2151         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2152
2153         return get_swappiness(memcg);
2154 }
2155
2156 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2157                                        u64 val)
2158 {
2159         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2160         struct mem_cgroup *parent;
2161
2162         if (val > 100)
2163                 return -EINVAL;
2164
2165         if (cgrp->parent == NULL)
2166                 return -EINVAL;
2167
2168         parent = mem_cgroup_from_cont(cgrp->parent);
2169
2170         cgroup_lock();
2171
2172         /* If under hierarchy, only empty-root can set this value */
2173         if ((parent->use_hierarchy) ||
2174             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2175                 cgroup_unlock();
2176                 return -EINVAL;
2177         }
2178
2179         spin_lock(&memcg->reclaim_param_lock);
2180         memcg->swappiness = val;
2181         spin_unlock(&memcg->reclaim_param_lock);
2182
2183         cgroup_unlock();
2184
2185         return 0;
2186 }
2187
2188
2189 static struct cftype mem_cgroup_files[] = {
2190         {
2191                 .name = "usage_in_bytes",
2192                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2193                 .read_u64 = mem_cgroup_read,
2194         },
2195         {
2196                 .name = "max_usage_in_bytes",
2197                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2198                 .trigger = mem_cgroup_reset,
2199                 .read_u64 = mem_cgroup_read,
2200         },
2201         {
2202                 .name = "limit_in_bytes",
2203                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2204                 .write_string = mem_cgroup_write,
2205                 .read_u64 = mem_cgroup_read,
2206         },
2207         {
2208                 .name = "failcnt",
2209                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2210                 .trigger = mem_cgroup_reset,
2211                 .read_u64 = mem_cgroup_read,
2212         },
2213         {
2214                 .name = "stat",
2215                 .read_map = mem_control_stat_show,
2216         },
2217         {
2218                 .name = "force_empty",
2219                 .trigger = mem_cgroup_force_empty_write,
2220         },
2221         {
2222                 .name = "use_hierarchy",
2223                 .write_u64 = mem_cgroup_hierarchy_write,
2224                 .read_u64 = mem_cgroup_hierarchy_read,
2225         },
2226         {
2227                 .name = "swappiness",
2228                 .read_u64 = mem_cgroup_swappiness_read,
2229                 .write_u64 = mem_cgroup_swappiness_write,
2230         },
2231 };
2232
2233 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2234 static struct cftype memsw_cgroup_files[] = {
2235         {
2236                 .name = "memsw.usage_in_bytes",
2237                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2238                 .read_u64 = mem_cgroup_read,
2239         },
2240         {
2241                 .name = "memsw.max_usage_in_bytes",
2242                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2243                 .trigger = mem_cgroup_reset,
2244                 .read_u64 = mem_cgroup_read,
2245         },
2246         {
2247                 .name = "memsw.limit_in_bytes",
2248                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2249                 .write_string = mem_cgroup_write,
2250                 .read_u64 = mem_cgroup_read,
2251         },
2252         {
2253                 .name = "memsw.failcnt",
2254                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2255                 .trigger = mem_cgroup_reset,
2256                 .read_u64 = mem_cgroup_read,
2257         },
2258 };
2259
2260 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2261 {
2262         if (!do_swap_account)
2263                 return 0;
2264         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2265                                 ARRAY_SIZE(memsw_cgroup_files));
2266 };
2267 #else
2268 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2269 {
2270         return 0;
2271 }
2272 #endif
2273
2274 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2275 {
2276         struct mem_cgroup_per_node *pn;
2277         struct mem_cgroup_per_zone *mz;
2278         enum lru_list l;
2279         int zone, tmp = node;
2280         /*
2281          * This routine is called against possible nodes.
2282          * But it's BUG to call kmalloc() against offline node.
2283          *
2284          * TODO: this routine can waste much memory for nodes which will
2285          *       never be onlined. It's better to use memory hotplug callback
2286          *       function.
2287          */
2288         if (!node_state(node, N_NORMAL_MEMORY))
2289                 tmp = -1;
2290         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2291         if (!pn)
2292                 return 1;
2293
2294         mem->info.nodeinfo[node] = pn;
2295         memset(pn, 0, sizeof(*pn));
2296
2297         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2298                 mz = &pn->zoneinfo[zone];
2299                 for_each_lru(l)
2300                         INIT_LIST_HEAD(&mz->lists[l]);
2301         }
2302         return 0;
2303 }
2304
2305 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2306 {
2307         kfree(mem->info.nodeinfo[node]);
2308 }
2309
2310 static int mem_cgroup_size(void)
2311 {
2312         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2313         return sizeof(struct mem_cgroup) + cpustat_size;
2314 }
2315
2316 static struct mem_cgroup *mem_cgroup_alloc(void)
2317 {
2318         struct mem_cgroup *mem;
2319         int size = mem_cgroup_size();
2320
2321         if (size < PAGE_SIZE)
2322                 mem = kmalloc(size, GFP_KERNEL);
2323         else
2324                 mem = vmalloc(size);
2325
2326         if (mem)
2327                 memset(mem, 0, size);
2328         return mem;
2329 }
2330
2331 /*
2332  * At destroying mem_cgroup, references from swap_cgroup can remain.
2333  * (scanning all at force_empty is too costly...)
2334  *
2335  * Instead of clearing all references at force_empty, we remember
2336  * the number of reference from swap_cgroup and free mem_cgroup when
2337  * it goes down to 0.
2338  *
2339  * Removal of cgroup itself succeeds regardless of refs from swap.
2340  */
2341
2342 static void __mem_cgroup_free(struct mem_cgroup *mem)
2343 {
2344         int node;
2345
2346         free_css_id(&mem_cgroup_subsys, &mem->css);
2347
2348         for_each_node_state(node, N_POSSIBLE)
2349                 free_mem_cgroup_per_zone_info(mem, node);
2350
2351         if (mem_cgroup_size() < PAGE_SIZE)
2352                 kfree(mem);
2353         else
2354                 vfree(mem);
2355 }
2356
2357 static void mem_cgroup_get(struct mem_cgroup *mem)
2358 {
2359         atomic_inc(&mem->refcnt);
2360 }
2361
2362 static void mem_cgroup_put(struct mem_cgroup *mem)
2363 {
2364         if (atomic_dec_and_test(&mem->refcnt)) {
2365                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
2366                 __mem_cgroup_free(mem);
2367                 if (parent)
2368                         mem_cgroup_put(parent);
2369         }
2370 }
2371
2372 /*
2373  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2374  */
2375 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2376 {
2377         if (!mem->res.parent)
2378                 return NULL;
2379         return mem_cgroup_from_res_counter(mem->res.parent, res);
2380 }
2381
2382 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2383 static void __init enable_swap_cgroup(void)
2384 {
2385         if (!mem_cgroup_disabled() && really_do_swap_account)
2386                 do_swap_account = 1;
2387 }
2388 #else
2389 static void __init enable_swap_cgroup(void)
2390 {
2391 }
2392 #endif
2393
2394 static struct cgroup_subsys_state * __ref
2395 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2396 {
2397         struct mem_cgroup *mem, *parent;
2398         long error = -ENOMEM;
2399         int node;
2400
2401         mem = mem_cgroup_alloc();
2402         if (!mem)
2403                 return ERR_PTR(error);
2404
2405         for_each_node_state(node, N_POSSIBLE)
2406                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2407                         goto free_out;
2408         /* root ? */
2409         if (cont->parent == NULL) {
2410                 enable_swap_cgroup();
2411                 parent = NULL;
2412         } else {
2413                 parent = mem_cgroup_from_cont(cont->parent);
2414                 mem->use_hierarchy = parent->use_hierarchy;
2415         }
2416
2417         if (parent && parent->use_hierarchy) {
2418                 res_counter_init(&mem->res, &parent->res);
2419                 res_counter_init(&mem->memsw, &parent->memsw);
2420                 /*
2421                  * We increment refcnt of the parent to ensure that we can
2422                  * safely access it on res_counter_charge/uncharge.
2423                  * This refcnt will be decremented when freeing this
2424                  * mem_cgroup(see mem_cgroup_put).
2425                  */
2426                 mem_cgroup_get(parent);
2427         } else {
2428                 res_counter_init(&mem->res, NULL);
2429                 res_counter_init(&mem->memsw, NULL);
2430         }
2431         mem->last_scanned_child = 0;
2432         spin_lock_init(&mem->reclaim_param_lock);
2433
2434         if (parent)
2435                 mem->swappiness = get_swappiness(parent);
2436         atomic_set(&mem->refcnt, 1);
2437         return &mem->css;
2438 free_out:
2439         __mem_cgroup_free(mem);
2440         return ERR_PTR(error);
2441 }
2442
2443 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2444                                         struct cgroup *cont)
2445 {
2446         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2447
2448         return mem_cgroup_force_empty(mem, false);
2449 }
2450
2451 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2452                                 struct cgroup *cont)
2453 {
2454         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2455
2456         mem_cgroup_put(mem);
2457 }
2458
2459 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2460                                 struct cgroup *cont)
2461 {
2462         int ret;
2463
2464         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2465                                 ARRAY_SIZE(mem_cgroup_files));
2466
2467         if (!ret)
2468                 ret = register_memsw_files(cont, ss);
2469         return ret;
2470 }
2471
2472 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2473                                 struct cgroup *cont,
2474                                 struct cgroup *old_cont,
2475                                 struct task_struct *p)
2476 {
2477         mutex_lock(&memcg_tasklist);
2478         /*
2479          * FIXME: It's better to move charges of this process from old
2480          * memcg to new memcg. But it's just on TODO-List now.
2481          */
2482         mutex_unlock(&memcg_tasklist);
2483 }
2484
2485 struct cgroup_subsys mem_cgroup_subsys = {
2486         .name = "memory",
2487         .subsys_id = mem_cgroup_subsys_id,
2488         .create = mem_cgroup_create,
2489         .pre_destroy = mem_cgroup_pre_destroy,
2490         .destroy = mem_cgroup_destroy,
2491         .populate = mem_cgroup_populate,
2492         .attach = mem_cgroup_move_task,
2493         .early_init = 0,
2494         .use_id = 1,
2495 };
2496
2497 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2498
2499 static int __init disable_swap_account(char *s)
2500 {
2501         really_do_swap_account = 0;
2502         return 1;
2503 }
2504 __setup("noswapaccount", disable_swap_account);
2505 #endif