memcg: add comments explaining memory barriers
[safe/jmp/linux-2.6] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/swap.h>
34 #include <linux/spinlock.h>
35 #include <linux/fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/vmalloc.h>
38 #include <linux/mm_inline.h>
39 #include <linux/page_cgroup.h>
40 #include "internal.h"
41
42 #include <asm/uaccess.h>
43
44 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45 #define MEM_CGROUP_RECLAIM_RETRIES      5
46 struct mem_cgroup *root_mem_cgroup __read_mostly;
47
48 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
49 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
50 int do_swap_account __read_mostly;
51 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
52 #else
53 #define do_swap_account         (0)
54 #endif
55
56 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
57
58 /*
59  * Statistics for memory cgroup.
60  */
61 enum mem_cgroup_stat_index {
62         /*
63          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
64          */
65         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
66         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
67         MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
68         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
69         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
70
71         MEM_CGROUP_STAT_NSTATS,
72 };
73
74 struct mem_cgroup_stat_cpu {
75         s64 count[MEM_CGROUP_STAT_NSTATS];
76 } ____cacheline_aligned_in_smp;
77
78 struct mem_cgroup_stat {
79         struct mem_cgroup_stat_cpu cpustat[0];
80 };
81
82 /*
83  * For accounting under irq disable, no need for increment preempt count.
84  */
85 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
86                 enum mem_cgroup_stat_index idx, int val)
87 {
88         stat->count[idx] += val;
89 }
90
91 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
92                 enum mem_cgroup_stat_index idx)
93 {
94         int cpu;
95         s64 ret = 0;
96         for_each_possible_cpu(cpu)
97                 ret += stat->cpustat[cpu].count[idx];
98         return ret;
99 }
100
101 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
102 {
103         s64 ret;
104
105         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
106         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
107         return ret;
108 }
109
110 /*
111  * per-zone information in memory controller.
112  */
113 struct mem_cgroup_per_zone {
114         /*
115          * spin_lock to protect the per cgroup LRU
116          */
117         struct list_head        lists[NR_LRU_LISTS];
118         unsigned long           count[NR_LRU_LISTS];
119
120         struct zone_reclaim_stat reclaim_stat;
121 };
122 /* Macro for accessing counter */
123 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
124
125 struct mem_cgroup_per_node {
126         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
127 };
128
129 struct mem_cgroup_lru_info {
130         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
131 };
132
133 /*
134  * The memory controller data structure. The memory controller controls both
135  * page cache and RSS per cgroup. We would eventually like to provide
136  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
137  * to help the administrator determine what knobs to tune.
138  *
139  * TODO: Add a water mark for the memory controller. Reclaim will begin when
140  * we hit the water mark. May be even add a low water mark, such that
141  * no reclaim occurs from a cgroup at it's low water mark, this is
142  * a feature that will be implemented much later in the future.
143  */
144 struct mem_cgroup {
145         struct cgroup_subsys_state css;
146         /*
147          * the counter to account for memory usage
148          */
149         struct res_counter res;
150         /*
151          * the counter to account for mem+swap usage.
152          */
153         struct res_counter memsw;
154         /*
155          * Per cgroup active and inactive list, similar to the
156          * per zone LRU lists.
157          */
158         struct mem_cgroup_lru_info info;
159
160         /*
161           protect against reclaim related member.
162         */
163         spinlock_t reclaim_param_lock;
164
165         int     prev_priority;  /* for recording reclaim priority */
166
167         /*
168          * While reclaiming in a hiearchy, we cache the last child we
169          * reclaimed from.
170          */
171         int last_scanned_child;
172         /*
173          * Should the accounting and control be hierarchical, per subtree?
174          */
175         bool use_hierarchy;
176         unsigned long   last_oom_jiffies;
177         atomic_t        refcnt;
178
179         unsigned int    swappiness;
180
181         /* set when res.limit == memsw.limit */
182         bool            memsw_is_minimum;
183
184         /*
185          * statistics. This must be placed at the end of memcg.
186          */
187         struct mem_cgroup_stat stat;
188 };
189
190 enum charge_type {
191         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
192         MEM_CGROUP_CHARGE_TYPE_MAPPED,
193         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
194         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
195         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
196         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
197         NR_CHARGE_TYPE,
198 };
199
200 /* only for here (for easy reading.) */
201 #define PCGF_CACHE      (1UL << PCG_CACHE)
202 #define PCGF_USED       (1UL << PCG_USED)
203 #define PCGF_LOCK       (1UL << PCG_LOCK)
204 /* Not used, but added here for completeness */
205 #define PCGF_ACCT       (1UL << PCG_ACCT)
206
207 /* for encoding cft->private value on file */
208 #define _MEM                    (0)
209 #define _MEMSWAP                (1)
210 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
211 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
212 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
213
214 static void mem_cgroup_get(struct mem_cgroup *mem);
215 static void mem_cgroup_put(struct mem_cgroup *mem);
216 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
217
218 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
219                                          struct page_cgroup *pc,
220                                          bool charge)
221 {
222         int val = (charge)? 1 : -1;
223         struct mem_cgroup_stat *stat = &mem->stat;
224         struct mem_cgroup_stat_cpu *cpustat;
225         int cpu = get_cpu();
226
227         cpustat = &stat->cpustat[cpu];
228         if (PageCgroupCache(pc))
229                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
230         else
231                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
232
233         if (charge)
234                 __mem_cgroup_stat_add_safe(cpustat,
235                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
236         else
237                 __mem_cgroup_stat_add_safe(cpustat,
238                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
239         put_cpu();
240 }
241
242 static struct mem_cgroup_per_zone *
243 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
244 {
245         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
246 }
247
248 static struct mem_cgroup_per_zone *
249 page_cgroup_zoneinfo(struct page_cgroup *pc)
250 {
251         struct mem_cgroup *mem = pc->mem_cgroup;
252         int nid = page_cgroup_nid(pc);
253         int zid = page_cgroup_zid(pc);
254
255         if (!mem)
256                 return NULL;
257
258         return mem_cgroup_zoneinfo(mem, nid, zid);
259 }
260
261 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
262                                         enum lru_list idx)
263 {
264         int nid, zid;
265         struct mem_cgroup_per_zone *mz;
266         u64 total = 0;
267
268         for_each_online_node(nid)
269                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
270                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
271                         total += MEM_CGROUP_ZSTAT(mz, idx);
272                 }
273         return total;
274 }
275
276 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
277 {
278         return container_of(cgroup_subsys_state(cont,
279                                 mem_cgroup_subsys_id), struct mem_cgroup,
280                                 css);
281 }
282
283 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
284 {
285         /*
286          * mm_update_next_owner() may clear mm->owner to NULL
287          * if it races with swapoff, page migration, etc.
288          * So this can be called with p == NULL.
289          */
290         if (unlikely(!p))
291                 return NULL;
292
293         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
294                                 struct mem_cgroup, css);
295 }
296
297 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
298 {
299         struct mem_cgroup *mem = NULL;
300
301         if (!mm)
302                 return NULL;
303         /*
304          * Because we have no locks, mm->owner's may be being moved to other
305          * cgroup. We use css_tryget() here even if this looks
306          * pessimistic (rather than adding locks here).
307          */
308         rcu_read_lock();
309         do {
310                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
311                 if (unlikely(!mem))
312                         break;
313         } while (!css_tryget(&mem->css));
314         rcu_read_unlock();
315         return mem;
316 }
317
318 /*
319  * Call callback function against all cgroup under hierarchy tree.
320  */
321 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
322                           int (*func)(struct mem_cgroup *, void *))
323 {
324         int found, ret, nextid;
325         struct cgroup_subsys_state *css;
326         struct mem_cgroup *mem;
327
328         if (!root->use_hierarchy)
329                 return (*func)(root, data);
330
331         nextid = 1;
332         do {
333                 ret = 0;
334                 mem = NULL;
335
336                 rcu_read_lock();
337                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
338                                    &found);
339                 if (css && css_tryget(css))
340                         mem = container_of(css, struct mem_cgroup, css);
341                 rcu_read_unlock();
342
343                 if (mem) {
344                         ret = (*func)(mem, data);
345                         css_put(&mem->css);
346                 }
347                 nextid = found + 1;
348         } while (!ret && css);
349
350         return ret;
351 }
352
353 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
354 {
355         return (mem == root_mem_cgroup);
356 }
357
358 /*
359  * Following LRU functions are allowed to be used without PCG_LOCK.
360  * Operations are called by routine of global LRU independently from memcg.
361  * What we have to take care of here is validness of pc->mem_cgroup.
362  *
363  * Changes to pc->mem_cgroup happens when
364  * 1. charge
365  * 2. moving account
366  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
367  * It is added to LRU before charge.
368  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
369  * When moving account, the page is not on LRU. It's isolated.
370  */
371
372 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
373 {
374         struct page_cgroup *pc;
375         struct mem_cgroup_per_zone *mz;
376
377         if (mem_cgroup_disabled())
378                 return;
379         pc = lookup_page_cgroup(page);
380         /* can happen while we handle swapcache. */
381         if (!TestClearPageCgroupAcctLRU(pc))
382                 return;
383         VM_BUG_ON(!pc->mem_cgroup);
384         /*
385          * We don't check PCG_USED bit. It's cleared when the "page" is finally
386          * removed from global LRU.
387          */
388         mz = page_cgroup_zoneinfo(pc);
389         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
390         if (mem_cgroup_is_root(pc->mem_cgroup))
391                 return;
392         VM_BUG_ON(list_empty(&pc->lru));
393         list_del_init(&pc->lru);
394         return;
395 }
396
397 void mem_cgroup_del_lru(struct page *page)
398 {
399         mem_cgroup_del_lru_list(page, page_lru(page));
400 }
401
402 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
403 {
404         struct mem_cgroup_per_zone *mz;
405         struct page_cgroup *pc;
406
407         if (mem_cgroup_disabled())
408                 return;
409
410         pc = lookup_page_cgroup(page);
411         /*
412          * Used bit is set without atomic ops but after smp_wmb().
413          * For making pc->mem_cgroup visible, insert smp_rmb() here.
414          */
415         smp_rmb();
416         /* unused or root page is not rotated. */
417         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
418                 return;
419         mz = page_cgroup_zoneinfo(pc);
420         list_move(&pc->lru, &mz->lists[lru]);
421 }
422
423 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
424 {
425         struct page_cgroup *pc;
426         struct mem_cgroup_per_zone *mz;
427
428         if (mem_cgroup_disabled())
429                 return;
430         pc = lookup_page_cgroup(page);
431         VM_BUG_ON(PageCgroupAcctLRU(pc));
432         /*
433          * Used bit is set without atomic ops but after smp_wmb().
434          * For making pc->mem_cgroup visible, insert smp_rmb() here.
435          */
436         smp_rmb();
437         if (!PageCgroupUsed(pc))
438                 return;
439
440         mz = page_cgroup_zoneinfo(pc);
441         MEM_CGROUP_ZSTAT(mz, lru) += 1;
442         SetPageCgroupAcctLRU(pc);
443         if (mem_cgroup_is_root(pc->mem_cgroup))
444                 return;
445         list_add(&pc->lru, &mz->lists[lru]);
446 }
447
448 /*
449  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
450  * lru because the page may.be reused after it's fully uncharged (because of
451  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
452  * it again. This function is only used to charge SwapCache. It's done under
453  * lock_page and expected that zone->lru_lock is never held.
454  */
455 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
456 {
457         unsigned long flags;
458         struct zone *zone = page_zone(page);
459         struct page_cgroup *pc = lookup_page_cgroup(page);
460
461         spin_lock_irqsave(&zone->lru_lock, flags);
462         /*
463          * Forget old LRU when this page_cgroup is *not* used. This Used bit
464          * is guarded by lock_page() because the page is SwapCache.
465          */
466         if (!PageCgroupUsed(pc))
467                 mem_cgroup_del_lru_list(page, page_lru(page));
468         spin_unlock_irqrestore(&zone->lru_lock, flags);
469 }
470
471 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
472 {
473         unsigned long flags;
474         struct zone *zone = page_zone(page);
475         struct page_cgroup *pc = lookup_page_cgroup(page);
476
477         spin_lock_irqsave(&zone->lru_lock, flags);
478         /* link when the page is linked to LRU but page_cgroup isn't */
479         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
480                 mem_cgroup_add_lru_list(page, page_lru(page));
481         spin_unlock_irqrestore(&zone->lru_lock, flags);
482 }
483
484
485 void mem_cgroup_move_lists(struct page *page,
486                            enum lru_list from, enum lru_list to)
487 {
488         if (mem_cgroup_disabled())
489                 return;
490         mem_cgroup_del_lru_list(page, from);
491         mem_cgroup_add_lru_list(page, to);
492 }
493
494 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
495 {
496         int ret;
497         struct mem_cgroup *curr = NULL;
498
499         task_lock(task);
500         rcu_read_lock();
501         curr = try_get_mem_cgroup_from_mm(task->mm);
502         rcu_read_unlock();
503         task_unlock(task);
504         if (!curr)
505                 return 0;
506         if (curr->use_hierarchy)
507                 ret = css_is_ancestor(&curr->css, &mem->css);
508         else
509                 ret = (curr == mem);
510         css_put(&curr->css);
511         return ret;
512 }
513
514 /*
515  * prev_priority control...this will be used in memory reclaim path.
516  */
517 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
518 {
519         int prev_priority;
520
521         spin_lock(&mem->reclaim_param_lock);
522         prev_priority = mem->prev_priority;
523         spin_unlock(&mem->reclaim_param_lock);
524
525         return prev_priority;
526 }
527
528 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
529 {
530         spin_lock(&mem->reclaim_param_lock);
531         if (priority < mem->prev_priority)
532                 mem->prev_priority = priority;
533         spin_unlock(&mem->reclaim_param_lock);
534 }
535
536 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
537 {
538         spin_lock(&mem->reclaim_param_lock);
539         mem->prev_priority = priority;
540         spin_unlock(&mem->reclaim_param_lock);
541 }
542
543 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
544 {
545         unsigned long active;
546         unsigned long inactive;
547         unsigned long gb;
548         unsigned long inactive_ratio;
549
550         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
551         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
552
553         gb = (inactive + active) >> (30 - PAGE_SHIFT);
554         if (gb)
555                 inactive_ratio = int_sqrt(10 * gb);
556         else
557                 inactive_ratio = 1;
558
559         if (present_pages) {
560                 present_pages[0] = inactive;
561                 present_pages[1] = active;
562         }
563
564         return inactive_ratio;
565 }
566
567 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
568 {
569         unsigned long active;
570         unsigned long inactive;
571         unsigned long present_pages[2];
572         unsigned long inactive_ratio;
573
574         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
575
576         inactive = present_pages[0];
577         active = present_pages[1];
578
579         if (inactive * inactive_ratio < active)
580                 return 1;
581
582         return 0;
583 }
584
585 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
586 {
587         unsigned long active;
588         unsigned long inactive;
589
590         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
591         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
592
593         return (active > inactive);
594 }
595
596 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
597                                        struct zone *zone,
598                                        enum lru_list lru)
599 {
600         int nid = zone->zone_pgdat->node_id;
601         int zid = zone_idx(zone);
602         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
603
604         return MEM_CGROUP_ZSTAT(mz, lru);
605 }
606
607 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
608                                                       struct zone *zone)
609 {
610         int nid = zone->zone_pgdat->node_id;
611         int zid = zone_idx(zone);
612         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
613
614         return &mz->reclaim_stat;
615 }
616
617 struct zone_reclaim_stat *
618 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
619 {
620         struct page_cgroup *pc;
621         struct mem_cgroup_per_zone *mz;
622
623         if (mem_cgroup_disabled())
624                 return NULL;
625
626         pc = lookup_page_cgroup(page);
627         /*
628          * Used bit is set without atomic ops but after smp_wmb().
629          * For making pc->mem_cgroup visible, insert smp_rmb() here.
630          */
631         smp_rmb();
632         if (!PageCgroupUsed(pc))
633                 return NULL;
634
635         mz = page_cgroup_zoneinfo(pc);
636         if (!mz)
637                 return NULL;
638
639         return &mz->reclaim_stat;
640 }
641
642 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
643                                         struct list_head *dst,
644                                         unsigned long *scanned, int order,
645                                         int mode, struct zone *z,
646                                         struct mem_cgroup *mem_cont,
647                                         int active, int file)
648 {
649         unsigned long nr_taken = 0;
650         struct page *page;
651         unsigned long scan;
652         LIST_HEAD(pc_list);
653         struct list_head *src;
654         struct page_cgroup *pc, *tmp;
655         int nid = z->zone_pgdat->node_id;
656         int zid = zone_idx(z);
657         struct mem_cgroup_per_zone *mz;
658         int lru = LRU_FILE * file + active;
659         int ret;
660
661         BUG_ON(!mem_cont);
662         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
663         src = &mz->lists[lru];
664
665         scan = 0;
666         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
667                 if (scan >= nr_to_scan)
668                         break;
669
670                 page = pc->page;
671                 if (unlikely(!PageCgroupUsed(pc)))
672                         continue;
673                 if (unlikely(!PageLRU(page)))
674                         continue;
675
676                 scan++;
677                 ret = __isolate_lru_page(page, mode, file);
678                 switch (ret) {
679                 case 0:
680                         list_move(&page->lru, dst);
681                         mem_cgroup_del_lru(page);
682                         nr_taken++;
683                         break;
684                 case -EBUSY:
685                         /* we don't affect global LRU but rotate in our LRU */
686                         mem_cgroup_rotate_lru_list(page, page_lru(page));
687                         break;
688                 default:
689                         break;
690                 }
691         }
692
693         *scanned = scan;
694         return nr_taken;
695 }
696
697 #define mem_cgroup_from_res_counter(counter, member)    \
698         container_of(counter, struct mem_cgroup, member)
699
700 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
701 {
702         if (do_swap_account) {
703                 if (res_counter_check_under_limit(&mem->res) &&
704                         res_counter_check_under_limit(&mem->memsw))
705                         return true;
706         } else
707                 if (res_counter_check_under_limit(&mem->res))
708                         return true;
709         return false;
710 }
711
712 static unsigned int get_swappiness(struct mem_cgroup *memcg)
713 {
714         struct cgroup *cgrp = memcg->css.cgroup;
715         unsigned int swappiness;
716
717         /* root ? */
718         if (cgrp->parent == NULL)
719                 return vm_swappiness;
720
721         spin_lock(&memcg->reclaim_param_lock);
722         swappiness = memcg->swappiness;
723         spin_unlock(&memcg->reclaim_param_lock);
724
725         return swappiness;
726 }
727
728 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
729 {
730         int *val = data;
731         (*val)++;
732         return 0;
733 }
734
735 /**
736  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
737  * @memcg: The memory cgroup that went over limit
738  * @p: Task that is going to be killed
739  *
740  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
741  * enabled
742  */
743 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
744 {
745         struct cgroup *task_cgrp;
746         struct cgroup *mem_cgrp;
747         /*
748          * Need a buffer in BSS, can't rely on allocations. The code relies
749          * on the assumption that OOM is serialized for memory controller.
750          * If this assumption is broken, revisit this code.
751          */
752         static char memcg_name[PATH_MAX];
753         int ret;
754
755         if (!memcg)
756                 return;
757
758
759         rcu_read_lock();
760
761         mem_cgrp = memcg->css.cgroup;
762         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
763
764         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
765         if (ret < 0) {
766                 /*
767                  * Unfortunately, we are unable to convert to a useful name
768                  * But we'll still print out the usage information
769                  */
770                 rcu_read_unlock();
771                 goto done;
772         }
773         rcu_read_unlock();
774
775         printk(KERN_INFO "Task in %s killed", memcg_name);
776
777         rcu_read_lock();
778         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
779         if (ret < 0) {
780                 rcu_read_unlock();
781                 goto done;
782         }
783         rcu_read_unlock();
784
785         /*
786          * Continues from above, so we don't need an KERN_ level
787          */
788         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
789 done:
790
791         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
792                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
793                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
794                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
795         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
796                 "failcnt %llu\n",
797                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
798                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
799                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
800 }
801
802 /*
803  * This function returns the number of memcg under hierarchy tree. Returns
804  * 1(self count) if no children.
805  */
806 static int mem_cgroup_count_children(struct mem_cgroup *mem)
807 {
808         int num = 0;
809         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
810         return num;
811 }
812
813 /*
814  * Visit the first child (need not be the first child as per the ordering
815  * of the cgroup list, since we track last_scanned_child) of @mem and use
816  * that to reclaim free pages from.
817  */
818 static struct mem_cgroup *
819 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
820 {
821         struct mem_cgroup *ret = NULL;
822         struct cgroup_subsys_state *css;
823         int nextid, found;
824
825         if (!root_mem->use_hierarchy) {
826                 css_get(&root_mem->css);
827                 ret = root_mem;
828         }
829
830         while (!ret) {
831                 rcu_read_lock();
832                 nextid = root_mem->last_scanned_child + 1;
833                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
834                                    &found);
835                 if (css && css_tryget(css))
836                         ret = container_of(css, struct mem_cgroup, css);
837
838                 rcu_read_unlock();
839                 /* Updates scanning parameter */
840                 spin_lock(&root_mem->reclaim_param_lock);
841                 if (!css) {
842                         /* this means start scan from ID:1 */
843                         root_mem->last_scanned_child = 0;
844                 } else
845                         root_mem->last_scanned_child = found;
846                 spin_unlock(&root_mem->reclaim_param_lock);
847         }
848
849         return ret;
850 }
851
852 /*
853  * Scan the hierarchy if needed to reclaim memory. We remember the last child
854  * we reclaimed from, so that we don't end up penalizing one child extensively
855  * based on its position in the children list.
856  *
857  * root_mem is the original ancestor that we've been reclaim from.
858  *
859  * We give up and return to the caller when we visit root_mem twice.
860  * (other groups can be removed while we're walking....)
861  *
862  * If shrink==true, for avoiding to free too much, this returns immedieately.
863  */
864 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
865                                    gfp_t gfp_mask, bool noswap, bool shrink)
866 {
867         struct mem_cgroup *victim;
868         int ret, total = 0;
869         int loop = 0;
870
871         /* If memsw_is_minimum==1, swap-out is of-no-use. */
872         if (root_mem->memsw_is_minimum)
873                 noswap = true;
874
875         while (loop < 2) {
876                 victim = mem_cgroup_select_victim(root_mem);
877                 if (victim == root_mem)
878                         loop++;
879                 if (!mem_cgroup_local_usage(&victim->stat)) {
880                         /* this cgroup's local usage == 0 */
881                         css_put(&victim->css);
882                         continue;
883                 }
884                 /* we use swappiness of local cgroup */
885                 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
886                                                    get_swappiness(victim));
887                 css_put(&victim->css);
888                 /*
889                  * At shrinking usage, we can't check we should stop here or
890                  * reclaim more. It's depends on callers. last_scanned_child
891                  * will work enough for keeping fairness under tree.
892                  */
893                 if (shrink)
894                         return ret;
895                 total += ret;
896                 if (mem_cgroup_check_under_limit(root_mem))
897                         return 1 + total;
898         }
899         return total;
900 }
901
902 bool mem_cgroup_oom_called(struct task_struct *task)
903 {
904         bool ret = false;
905         struct mem_cgroup *mem;
906         struct mm_struct *mm;
907
908         rcu_read_lock();
909         mm = task->mm;
910         if (!mm)
911                 mm = &init_mm;
912         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
913         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
914                 ret = true;
915         rcu_read_unlock();
916         return ret;
917 }
918
919 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
920 {
921         mem->last_oom_jiffies = jiffies;
922         return 0;
923 }
924
925 static void record_last_oom(struct mem_cgroup *mem)
926 {
927         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
928 }
929
930 /*
931  * Currently used to update mapped file statistics, but the routine can be
932  * generalized to update other statistics as well.
933  */
934 void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
935 {
936         struct mem_cgroup *mem;
937         struct mem_cgroup_stat *stat;
938         struct mem_cgroup_stat_cpu *cpustat;
939         int cpu;
940         struct page_cgroup *pc;
941
942         if (!page_is_file_cache(page))
943                 return;
944
945         pc = lookup_page_cgroup(page);
946         if (unlikely(!pc))
947                 return;
948
949         lock_page_cgroup(pc);
950         mem = pc->mem_cgroup;
951         if (!mem)
952                 goto done;
953
954         if (!PageCgroupUsed(pc))
955                 goto done;
956
957         /*
958          * Preemption is already disabled, we don't need get_cpu()
959          */
960         cpu = smp_processor_id();
961         stat = &mem->stat;
962         cpustat = &stat->cpustat[cpu];
963
964         __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
965 done:
966         unlock_page_cgroup(pc);
967 }
968
969 /*
970  * Unlike exported interface, "oom" parameter is added. if oom==true,
971  * oom-killer can be invoked.
972  */
973 static int __mem_cgroup_try_charge(struct mm_struct *mm,
974                         gfp_t gfp_mask, struct mem_cgroup **memcg,
975                         bool oom)
976 {
977         struct mem_cgroup *mem, *mem_over_limit;
978         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
979         struct res_counter *fail_res;
980
981         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
982                 /* Don't account this! */
983                 *memcg = NULL;
984                 return 0;
985         }
986
987         /*
988          * We always charge the cgroup the mm_struct belongs to.
989          * The mm_struct's mem_cgroup changes on task migration if the
990          * thread group leader migrates. It's possible that mm is not
991          * set, if so charge the init_mm (happens for pagecache usage).
992          */
993         mem = *memcg;
994         if (likely(!mem)) {
995                 mem = try_get_mem_cgroup_from_mm(mm);
996                 *memcg = mem;
997         } else {
998                 css_get(&mem->css);
999         }
1000         if (unlikely(!mem))
1001                 return 0;
1002
1003         VM_BUG_ON(css_is_removed(&mem->css));
1004
1005         while (1) {
1006                 int ret;
1007                 bool noswap = false;
1008
1009                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
1010                 if (likely(!ret)) {
1011                         if (!do_swap_account)
1012                                 break;
1013                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
1014                                                         &fail_res);
1015                         if (likely(!ret))
1016                                 break;
1017                         /* mem+swap counter fails */
1018                         res_counter_uncharge(&mem->res, PAGE_SIZE);
1019                         noswap = true;
1020                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1021                                                                         memsw);
1022                 } else
1023                         /* mem counter fails */
1024                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1025                                                                         res);
1026
1027                 if (!(gfp_mask & __GFP_WAIT))
1028                         goto nomem;
1029
1030                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
1031                                                         noswap, false);
1032                 if (ret)
1033                         continue;
1034
1035                 /*
1036                  * try_to_free_mem_cgroup_pages() might not give us a full
1037                  * picture of reclaim. Some pages are reclaimed and might be
1038                  * moved to swap cache or just unmapped from the cgroup.
1039                  * Check the limit again to see if the reclaim reduced the
1040                  * current usage of the cgroup before giving up
1041                  *
1042                  */
1043                 if (mem_cgroup_check_under_limit(mem_over_limit))
1044                         continue;
1045
1046                 if (!nr_retries--) {
1047                         if (oom) {
1048                                 mutex_lock(&memcg_tasklist);
1049                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
1050                                 mutex_unlock(&memcg_tasklist);
1051                                 record_last_oom(mem_over_limit);
1052                         }
1053                         goto nomem;
1054                 }
1055         }
1056         return 0;
1057 nomem:
1058         css_put(&mem->css);
1059         return -ENOMEM;
1060 }
1061
1062
1063 /*
1064  * A helper function to get mem_cgroup from ID. must be called under
1065  * rcu_read_lock(). The caller must check css_is_removed() or some if
1066  * it's concern. (dropping refcnt from swap can be called against removed
1067  * memcg.)
1068  */
1069 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1070 {
1071         struct cgroup_subsys_state *css;
1072
1073         /* ID 0 is unused ID */
1074         if (!id)
1075                 return NULL;
1076         css = css_lookup(&mem_cgroup_subsys, id);
1077         if (!css)
1078                 return NULL;
1079         return container_of(css, struct mem_cgroup, css);
1080 }
1081
1082 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1083 {
1084         struct mem_cgroup *mem;
1085         struct page_cgroup *pc;
1086         unsigned short id;
1087         swp_entry_t ent;
1088
1089         VM_BUG_ON(!PageLocked(page));
1090
1091         if (!PageSwapCache(page))
1092                 return NULL;
1093
1094         pc = lookup_page_cgroup(page);
1095         lock_page_cgroup(pc);
1096         if (PageCgroupUsed(pc)) {
1097                 mem = pc->mem_cgroup;
1098                 if (mem && !css_tryget(&mem->css))
1099                         mem = NULL;
1100         } else {
1101                 ent.val = page_private(page);
1102                 id = lookup_swap_cgroup(ent);
1103                 rcu_read_lock();
1104                 mem = mem_cgroup_lookup(id);
1105                 if (mem && !css_tryget(&mem->css))
1106                         mem = NULL;
1107                 rcu_read_unlock();
1108         }
1109         unlock_page_cgroup(pc);
1110         return mem;
1111 }
1112
1113 /*
1114  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1115  * USED state. If already USED, uncharge and return.
1116  */
1117
1118 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1119                                      struct page_cgroup *pc,
1120                                      enum charge_type ctype)
1121 {
1122         /* try_charge() can return NULL to *memcg, taking care of it. */
1123         if (!mem)
1124                 return;
1125
1126         lock_page_cgroup(pc);
1127         if (unlikely(PageCgroupUsed(pc))) {
1128                 unlock_page_cgroup(pc);
1129                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1130                 if (do_swap_account)
1131                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1132                 css_put(&mem->css);
1133                 return;
1134         }
1135
1136         pc->mem_cgroup = mem;
1137         /*
1138          * We access a page_cgroup asynchronously without lock_page_cgroup().
1139          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1140          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1141          * before USED bit, we need memory barrier here.
1142          * See mem_cgroup_add_lru_list(), etc.
1143          */
1144         smp_wmb();
1145         switch (ctype) {
1146         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1147         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1148                 SetPageCgroupCache(pc);
1149                 SetPageCgroupUsed(pc);
1150                 break;
1151         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1152                 ClearPageCgroupCache(pc);
1153                 SetPageCgroupUsed(pc);
1154                 break;
1155         default:
1156                 break;
1157         }
1158
1159         mem_cgroup_charge_statistics(mem, pc, true);
1160
1161         unlock_page_cgroup(pc);
1162 }
1163
1164 /**
1165  * mem_cgroup_move_account - move account of the page
1166  * @pc: page_cgroup of the page.
1167  * @from: mem_cgroup which the page is moved from.
1168  * @to: mem_cgroup which the page is moved to. @from != @to.
1169  *
1170  * The caller must confirm following.
1171  * - page is not on LRU (isolate_page() is useful.)
1172  *
1173  * returns 0 at success,
1174  * returns -EBUSY when lock is busy or "pc" is unstable.
1175  *
1176  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1177  * new cgroup. It should be done by a caller.
1178  */
1179
1180 static int mem_cgroup_move_account(struct page_cgroup *pc,
1181         struct mem_cgroup *from, struct mem_cgroup *to)
1182 {
1183         struct mem_cgroup_per_zone *from_mz, *to_mz;
1184         int nid, zid;
1185         int ret = -EBUSY;
1186         struct page *page;
1187         int cpu;
1188         struct mem_cgroup_stat *stat;
1189         struct mem_cgroup_stat_cpu *cpustat;
1190
1191         VM_BUG_ON(from == to);
1192         VM_BUG_ON(PageLRU(pc->page));
1193
1194         nid = page_cgroup_nid(pc);
1195         zid = page_cgroup_zid(pc);
1196         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1197         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1198
1199         if (!trylock_page_cgroup(pc))
1200                 return ret;
1201
1202         if (!PageCgroupUsed(pc))
1203                 goto out;
1204
1205         if (pc->mem_cgroup != from)
1206                 goto out;
1207
1208         res_counter_uncharge(&from->res, PAGE_SIZE);
1209         mem_cgroup_charge_statistics(from, pc, false);
1210
1211         page = pc->page;
1212         if (page_is_file_cache(page) && page_mapped(page)) {
1213                 cpu = smp_processor_id();
1214                 /* Update mapped_file data for mem_cgroup "from" */
1215                 stat = &from->stat;
1216                 cpustat = &stat->cpustat[cpu];
1217                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1218                                                 -1);
1219
1220                 /* Update mapped_file data for mem_cgroup "to" */
1221                 stat = &to->stat;
1222                 cpustat = &stat->cpustat[cpu];
1223                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1224                                                 1);
1225         }
1226
1227         if (do_swap_account)
1228                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1229         css_put(&from->css);
1230
1231         css_get(&to->css);
1232         pc->mem_cgroup = to;
1233         mem_cgroup_charge_statistics(to, pc, true);
1234         ret = 0;
1235 out:
1236         unlock_page_cgroup(pc);
1237         /*
1238          * We charges against "to" which may not have any tasks. Then, "to"
1239          * can be under rmdir(). But in current implementation, caller of
1240          * this function is just force_empty() and it's garanteed that
1241          * "to" is never removed. So, we don't check rmdir status here.
1242          */
1243         return ret;
1244 }
1245
1246 /*
1247  * move charges to its parent.
1248  */
1249
1250 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1251                                   struct mem_cgroup *child,
1252                                   gfp_t gfp_mask)
1253 {
1254         struct page *page = pc->page;
1255         struct cgroup *cg = child->css.cgroup;
1256         struct cgroup *pcg = cg->parent;
1257         struct mem_cgroup *parent;
1258         int ret;
1259
1260         /* Is ROOT ? */
1261         if (!pcg)
1262                 return -EINVAL;
1263
1264
1265         parent = mem_cgroup_from_cont(pcg);
1266
1267
1268         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1269         if (ret || !parent)
1270                 return ret;
1271
1272         if (!get_page_unless_zero(page)) {
1273                 ret = -EBUSY;
1274                 goto uncharge;
1275         }
1276
1277         ret = isolate_lru_page(page);
1278
1279         if (ret)
1280                 goto cancel;
1281
1282         ret = mem_cgroup_move_account(pc, child, parent);
1283
1284         putback_lru_page(page);
1285         if (!ret) {
1286                 put_page(page);
1287                 /* drop extra refcnt by try_charge() */
1288                 css_put(&parent->css);
1289                 return 0;
1290         }
1291
1292 cancel:
1293         put_page(page);
1294 uncharge:
1295         /* drop extra refcnt by try_charge() */
1296         css_put(&parent->css);
1297         /* uncharge if move fails */
1298         res_counter_uncharge(&parent->res, PAGE_SIZE);
1299         if (do_swap_account)
1300                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1301         return ret;
1302 }
1303
1304 /*
1305  * Charge the memory controller for page usage.
1306  * Return
1307  * 0 if the charge was successful
1308  * < 0 if the cgroup is over its limit
1309  */
1310 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1311                                 gfp_t gfp_mask, enum charge_type ctype,
1312                                 struct mem_cgroup *memcg)
1313 {
1314         struct mem_cgroup *mem;
1315         struct page_cgroup *pc;
1316         int ret;
1317
1318         pc = lookup_page_cgroup(page);
1319         /* can happen at boot */
1320         if (unlikely(!pc))
1321                 return 0;
1322         prefetchw(pc);
1323
1324         mem = memcg;
1325         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1326         if (ret || !mem)
1327                 return ret;
1328
1329         __mem_cgroup_commit_charge(mem, pc, ctype);
1330         return 0;
1331 }
1332
1333 int mem_cgroup_newpage_charge(struct page *page,
1334                               struct mm_struct *mm, gfp_t gfp_mask)
1335 {
1336         if (mem_cgroup_disabled())
1337                 return 0;
1338         if (PageCompound(page))
1339                 return 0;
1340         /*
1341          * If already mapped, we don't have to account.
1342          * If page cache, page->mapping has address_space.
1343          * But page->mapping may have out-of-use anon_vma pointer,
1344          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1345          * is NULL.
1346          */
1347         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1348                 return 0;
1349         if (unlikely(!mm))
1350                 mm = &init_mm;
1351         return mem_cgroup_charge_common(page, mm, gfp_mask,
1352                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1353 }
1354
1355 static void
1356 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1357                                         enum charge_type ctype);
1358
1359 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1360                                 gfp_t gfp_mask)
1361 {
1362         struct mem_cgroup *mem = NULL;
1363         int ret;
1364
1365         if (mem_cgroup_disabled())
1366                 return 0;
1367         if (PageCompound(page))
1368                 return 0;
1369         /*
1370          * Corner case handling. This is called from add_to_page_cache()
1371          * in usual. But some FS (shmem) precharges this page before calling it
1372          * and call add_to_page_cache() with GFP_NOWAIT.
1373          *
1374          * For GFP_NOWAIT case, the page may be pre-charged before calling
1375          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1376          * charge twice. (It works but has to pay a bit larger cost.)
1377          * And when the page is SwapCache, it should take swap information
1378          * into account. This is under lock_page() now.
1379          */
1380         if (!(gfp_mask & __GFP_WAIT)) {
1381                 struct page_cgroup *pc;
1382
1383
1384                 pc = lookup_page_cgroup(page);
1385                 if (!pc)
1386                         return 0;
1387                 lock_page_cgroup(pc);
1388                 if (PageCgroupUsed(pc)) {
1389                         unlock_page_cgroup(pc);
1390                         return 0;
1391                 }
1392                 unlock_page_cgroup(pc);
1393         }
1394
1395         if (unlikely(!mm && !mem))
1396                 mm = &init_mm;
1397
1398         if (page_is_file_cache(page))
1399                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1400                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1401
1402         /* shmem */
1403         if (PageSwapCache(page)) {
1404                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1405                 if (!ret)
1406                         __mem_cgroup_commit_charge_swapin(page, mem,
1407                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
1408         } else
1409                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1410                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1411
1412         return ret;
1413 }
1414
1415 /*
1416  * While swap-in, try_charge -> commit or cancel, the page is locked.
1417  * And when try_charge() successfully returns, one refcnt to memcg without
1418  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1419  * "commit()" or removed by "cancel()"
1420  */
1421 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1422                                  struct page *page,
1423                                  gfp_t mask, struct mem_cgroup **ptr)
1424 {
1425         struct mem_cgroup *mem;
1426         int ret;
1427
1428         if (mem_cgroup_disabled())
1429                 return 0;
1430
1431         if (!do_swap_account)
1432                 goto charge_cur_mm;
1433         /*
1434          * A racing thread's fault, or swapoff, may have already updated
1435          * the pte, and even removed page from swap cache: return success
1436          * to go on to do_swap_page()'s pte_same() test, which should fail.
1437          */
1438         if (!PageSwapCache(page))
1439                 return 0;
1440         mem = try_get_mem_cgroup_from_swapcache(page);
1441         if (!mem)
1442                 goto charge_cur_mm;
1443         *ptr = mem;
1444         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1445         /* drop extra refcnt from tryget */
1446         css_put(&mem->css);
1447         return ret;
1448 charge_cur_mm:
1449         if (unlikely(!mm))
1450                 mm = &init_mm;
1451         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1452 }
1453
1454 static void
1455 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1456                                         enum charge_type ctype)
1457 {
1458         struct page_cgroup *pc;
1459
1460         if (mem_cgroup_disabled())
1461                 return;
1462         if (!ptr)
1463                 return;
1464         cgroup_exclude_rmdir(&ptr->css);
1465         pc = lookup_page_cgroup(page);
1466         mem_cgroup_lru_del_before_commit_swapcache(page);
1467         __mem_cgroup_commit_charge(ptr, pc, ctype);
1468         mem_cgroup_lru_add_after_commit_swapcache(page);
1469         /*
1470          * Now swap is on-memory. This means this page may be
1471          * counted both as mem and swap....double count.
1472          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1473          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1474          * may call delete_from_swap_cache() before reach here.
1475          */
1476         if (do_swap_account && PageSwapCache(page)) {
1477                 swp_entry_t ent = {.val = page_private(page)};
1478                 unsigned short id;
1479                 struct mem_cgroup *memcg;
1480
1481                 id = swap_cgroup_record(ent, 0);
1482                 rcu_read_lock();
1483                 memcg = mem_cgroup_lookup(id);
1484                 if (memcg) {
1485                         /*
1486                          * This recorded memcg can be obsolete one. So, avoid
1487                          * calling css_tryget
1488                          */
1489                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1490                         mem_cgroup_put(memcg);
1491                 }
1492                 rcu_read_unlock();
1493         }
1494         /*
1495          * At swapin, we may charge account against cgroup which has no tasks.
1496          * So, rmdir()->pre_destroy() can be called while we do this charge.
1497          * In that case, we need to call pre_destroy() again. check it here.
1498          */
1499         cgroup_release_and_wakeup_rmdir(&ptr->css);
1500 }
1501
1502 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1503 {
1504         __mem_cgroup_commit_charge_swapin(page, ptr,
1505                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
1506 }
1507
1508 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1509 {
1510         if (mem_cgroup_disabled())
1511                 return;
1512         if (!mem)
1513                 return;
1514         res_counter_uncharge(&mem->res, PAGE_SIZE);
1515         if (do_swap_account)
1516                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1517         css_put(&mem->css);
1518 }
1519
1520
1521 /*
1522  * uncharge if !page_mapped(page)
1523  */
1524 static struct mem_cgroup *
1525 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1526 {
1527         struct page_cgroup *pc;
1528         struct mem_cgroup *mem = NULL;
1529         struct mem_cgroup_per_zone *mz;
1530
1531         if (mem_cgroup_disabled())
1532                 return NULL;
1533
1534         if (PageSwapCache(page))
1535                 return NULL;
1536
1537         /*
1538          * Check if our page_cgroup is valid
1539          */
1540         pc = lookup_page_cgroup(page);
1541         if (unlikely(!pc || !PageCgroupUsed(pc)))
1542                 return NULL;
1543
1544         lock_page_cgroup(pc);
1545
1546         mem = pc->mem_cgroup;
1547
1548         if (!PageCgroupUsed(pc))
1549                 goto unlock_out;
1550
1551         switch (ctype) {
1552         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1553         case MEM_CGROUP_CHARGE_TYPE_DROP:
1554                 if (page_mapped(page))
1555                         goto unlock_out;
1556                 break;
1557         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1558                 if (!PageAnon(page)) {  /* Shared memory */
1559                         if (page->mapping && !page_is_file_cache(page))
1560                                 goto unlock_out;
1561                 } else if (page_mapped(page)) /* Anon */
1562                                 goto unlock_out;
1563                 break;
1564         default:
1565                 break;
1566         }
1567
1568         res_counter_uncharge(&mem->res, PAGE_SIZE);
1569         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1570                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1571         mem_cgroup_charge_statistics(mem, pc, false);
1572
1573         ClearPageCgroupUsed(pc);
1574         /*
1575          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1576          * freed from LRU. This is safe because uncharged page is expected not
1577          * to be reused (freed soon). Exception is SwapCache, it's handled by
1578          * special functions.
1579          */
1580
1581         mz = page_cgroup_zoneinfo(pc);
1582         unlock_page_cgroup(pc);
1583
1584         /* at swapout, this memcg will be accessed to record to swap */
1585         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1586                 css_put(&mem->css);
1587
1588         return mem;
1589
1590 unlock_out:
1591         unlock_page_cgroup(pc);
1592         return NULL;
1593 }
1594
1595 void mem_cgroup_uncharge_page(struct page *page)
1596 {
1597         /* early check. */
1598         if (page_mapped(page))
1599                 return;
1600         if (page->mapping && !PageAnon(page))
1601                 return;
1602         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1603 }
1604
1605 void mem_cgroup_uncharge_cache_page(struct page *page)
1606 {
1607         VM_BUG_ON(page_mapped(page));
1608         VM_BUG_ON(page->mapping);
1609         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1610 }
1611
1612 #ifdef CONFIG_SWAP
1613 /*
1614  * called after __delete_from_swap_cache() and drop "page" account.
1615  * memcg information is recorded to swap_cgroup of "ent"
1616  */
1617 void
1618 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
1619 {
1620         struct mem_cgroup *memcg;
1621         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
1622
1623         if (!swapout) /* this was a swap cache but the swap is unused ! */
1624                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
1625
1626         memcg = __mem_cgroup_uncharge_common(page, ctype);
1627
1628         /* record memcg information */
1629         if (do_swap_account && swapout && memcg) {
1630                 swap_cgroup_record(ent, css_id(&memcg->css));
1631                 mem_cgroup_get(memcg);
1632         }
1633         if (swapout && memcg)
1634                 css_put(&memcg->css);
1635 }
1636 #endif
1637
1638 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1639 /*
1640  * called from swap_entry_free(). remove record in swap_cgroup and
1641  * uncharge "memsw" account.
1642  */
1643 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1644 {
1645         struct mem_cgroup *memcg;
1646         unsigned short id;
1647
1648         if (!do_swap_account)
1649                 return;
1650
1651         id = swap_cgroup_record(ent, 0);
1652         rcu_read_lock();
1653         memcg = mem_cgroup_lookup(id);
1654         if (memcg) {
1655                 /*
1656                  * We uncharge this because swap is freed.
1657                  * This memcg can be obsolete one. We avoid calling css_tryget
1658                  */
1659                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1660                 mem_cgroup_put(memcg);
1661         }
1662         rcu_read_unlock();
1663 }
1664 #endif
1665
1666 /*
1667  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1668  * page belongs to.
1669  */
1670 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1671 {
1672         struct page_cgroup *pc;
1673         struct mem_cgroup *mem = NULL;
1674         int ret = 0;
1675
1676         if (mem_cgroup_disabled())
1677                 return 0;
1678
1679         pc = lookup_page_cgroup(page);
1680         lock_page_cgroup(pc);
1681         if (PageCgroupUsed(pc)) {
1682                 mem = pc->mem_cgroup;
1683                 css_get(&mem->css);
1684         }
1685         unlock_page_cgroup(pc);
1686
1687         if (mem) {
1688                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1689                 css_put(&mem->css);
1690         }
1691         *ptr = mem;
1692         return ret;
1693 }
1694
1695 /* remove redundant charge if migration failed*/
1696 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1697                 struct page *oldpage, struct page *newpage)
1698 {
1699         struct page *target, *unused;
1700         struct page_cgroup *pc;
1701         enum charge_type ctype;
1702
1703         if (!mem)
1704                 return;
1705         cgroup_exclude_rmdir(&mem->css);
1706         /* at migration success, oldpage->mapping is NULL. */
1707         if (oldpage->mapping) {
1708                 target = oldpage;
1709                 unused = NULL;
1710         } else {
1711                 target = newpage;
1712                 unused = oldpage;
1713         }
1714
1715         if (PageAnon(target))
1716                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1717         else if (page_is_file_cache(target))
1718                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1719         else
1720                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1721
1722         /* unused page is not on radix-tree now. */
1723         if (unused)
1724                 __mem_cgroup_uncharge_common(unused, ctype);
1725
1726         pc = lookup_page_cgroup(target);
1727         /*
1728          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1729          * So, double-counting is effectively avoided.
1730          */
1731         __mem_cgroup_commit_charge(mem, pc, ctype);
1732
1733         /*
1734          * Both of oldpage and newpage are still under lock_page().
1735          * Then, we don't have to care about race in radix-tree.
1736          * But we have to be careful that this page is unmapped or not.
1737          *
1738          * There is a case for !page_mapped(). At the start of
1739          * migration, oldpage was mapped. But now, it's zapped.
1740          * But we know *target* page is not freed/reused under us.
1741          * mem_cgroup_uncharge_page() does all necessary checks.
1742          */
1743         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1744                 mem_cgroup_uncharge_page(target);
1745         /*
1746          * At migration, we may charge account against cgroup which has no tasks
1747          * So, rmdir()->pre_destroy() can be called while we do this charge.
1748          * In that case, we need to call pre_destroy() again. check it here.
1749          */
1750         cgroup_release_and_wakeup_rmdir(&mem->css);
1751 }
1752
1753 /*
1754  * A call to try to shrink memory usage on charge failure at shmem's swapin.
1755  * Calling hierarchical_reclaim is not enough because we should update
1756  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
1757  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
1758  * not from the memcg which this page would be charged to.
1759  * try_charge_swapin does all of these works properly.
1760  */
1761 int mem_cgroup_shmem_charge_fallback(struct page *page,
1762                             struct mm_struct *mm,
1763                             gfp_t gfp_mask)
1764 {
1765         struct mem_cgroup *mem = NULL;
1766         int ret;
1767
1768         if (mem_cgroup_disabled())
1769                 return 0;
1770
1771         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1772         if (!ret)
1773                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
1774
1775         return ret;
1776 }
1777
1778 static DEFINE_MUTEX(set_limit_mutex);
1779
1780 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1781                                 unsigned long long val)
1782 {
1783         int retry_count;
1784         int progress;
1785         u64 memswlimit;
1786         int ret = 0;
1787         int children = mem_cgroup_count_children(memcg);
1788         u64 curusage, oldusage;
1789
1790         /*
1791          * For keeping hierarchical_reclaim simple, how long we should retry
1792          * is depends on callers. We set our retry-count to be function
1793          * of # of children which we should visit in this loop.
1794          */
1795         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1796
1797         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1798
1799         while (retry_count) {
1800                 if (signal_pending(current)) {
1801                         ret = -EINTR;
1802                         break;
1803                 }
1804                 /*
1805                  * Rather than hide all in some function, I do this in
1806                  * open coded manner. You see what this really does.
1807                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1808                  */
1809                 mutex_lock(&set_limit_mutex);
1810                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1811                 if (memswlimit < val) {
1812                         ret = -EINVAL;
1813                         mutex_unlock(&set_limit_mutex);
1814                         break;
1815                 }
1816                 ret = res_counter_set_limit(&memcg->res, val);
1817                 if (!ret) {
1818                         if (memswlimit == val)
1819                                 memcg->memsw_is_minimum = true;
1820                         else
1821                                 memcg->memsw_is_minimum = false;
1822                 }
1823                 mutex_unlock(&set_limit_mutex);
1824
1825                 if (!ret)
1826                         break;
1827
1828                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1829                                                    false, true);
1830                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1831                 /* Usage is reduced ? */
1832                 if (curusage >= oldusage)
1833                         retry_count--;
1834                 else
1835                         oldusage = curusage;
1836         }
1837
1838         return ret;
1839 }
1840
1841 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1842                                         unsigned long long val)
1843 {
1844         int retry_count;
1845         u64 memlimit, oldusage, curusage;
1846         int children = mem_cgroup_count_children(memcg);
1847         int ret = -EBUSY;
1848
1849         /* see mem_cgroup_resize_res_limit */
1850         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1851         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1852         while (retry_count) {
1853                 if (signal_pending(current)) {
1854                         ret = -EINTR;
1855                         break;
1856                 }
1857                 /*
1858                  * Rather than hide all in some function, I do this in
1859                  * open coded manner. You see what this really does.
1860                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1861                  */
1862                 mutex_lock(&set_limit_mutex);
1863                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1864                 if (memlimit > val) {
1865                         ret = -EINVAL;
1866                         mutex_unlock(&set_limit_mutex);
1867                         break;
1868                 }
1869                 ret = res_counter_set_limit(&memcg->memsw, val);
1870                 if (!ret) {
1871                         if (memlimit == val)
1872                                 memcg->memsw_is_minimum = true;
1873                         else
1874                                 memcg->memsw_is_minimum = false;
1875                 }
1876                 mutex_unlock(&set_limit_mutex);
1877
1878                 if (!ret)
1879                         break;
1880
1881                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1882                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1883                 /* Usage is reduced ? */
1884                 if (curusage >= oldusage)
1885                         retry_count--;
1886                 else
1887                         oldusage = curusage;
1888         }
1889         return ret;
1890 }
1891
1892 /*
1893  * This routine traverse page_cgroup in given list and drop them all.
1894  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1895  */
1896 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1897                                 int node, int zid, enum lru_list lru)
1898 {
1899         struct zone *zone;
1900         struct mem_cgroup_per_zone *mz;
1901         struct page_cgroup *pc, *busy;
1902         unsigned long flags, loop;
1903         struct list_head *list;
1904         int ret = 0;
1905
1906         zone = &NODE_DATA(node)->node_zones[zid];
1907         mz = mem_cgroup_zoneinfo(mem, node, zid);
1908         list = &mz->lists[lru];
1909
1910         loop = MEM_CGROUP_ZSTAT(mz, lru);
1911         /* give some margin against EBUSY etc...*/
1912         loop += 256;
1913         busy = NULL;
1914         while (loop--) {
1915                 ret = 0;
1916                 spin_lock_irqsave(&zone->lru_lock, flags);
1917                 if (list_empty(list)) {
1918                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1919                         break;
1920                 }
1921                 pc = list_entry(list->prev, struct page_cgroup, lru);
1922                 if (busy == pc) {
1923                         list_move(&pc->lru, list);
1924                         busy = 0;
1925                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1926                         continue;
1927                 }
1928                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1929
1930                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1931                 if (ret == -ENOMEM)
1932                         break;
1933
1934                 if (ret == -EBUSY || ret == -EINVAL) {
1935                         /* found lock contention or "pc" is obsolete. */
1936                         busy = pc;
1937                         cond_resched();
1938                 } else
1939                         busy = NULL;
1940         }
1941
1942         if (!ret && !list_empty(list))
1943                 return -EBUSY;
1944         return ret;
1945 }
1946
1947 /*
1948  * make mem_cgroup's charge to be 0 if there is no task.
1949  * This enables deleting this mem_cgroup.
1950  */
1951 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1952 {
1953         int ret;
1954         int node, zid, shrink;
1955         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1956         struct cgroup *cgrp = mem->css.cgroup;
1957
1958         css_get(&mem->css);
1959
1960         shrink = 0;
1961         /* should free all ? */
1962         if (free_all)
1963                 goto try_to_free;
1964 move_account:
1965         while (mem->res.usage > 0) {
1966                 ret = -EBUSY;
1967                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1968                         goto out;
1969                 ret = -EINTR;
1970                 if (signal_pending(current))
1971                         goto out;
1972                 /* This is for making all *used* pages to be on LRU. */
1973                 lru_add_drain_all();
1974                 ret = 0;
1975                 for_each_node_state(node, N_HIGH_MEMORY) {
1976                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1977                                 enum lru_list l;
1978                                 for_each_lru(l) {
1979                                         ret = mem_cgroup_force_empty_list(mem,
1980                                                         node, zid, l);
1981                                         if (ret)
1982                                                 break;
1983                                 }
1984                         }
1985                         if (ret)
1986                                 break;
1987                 }
1988                 /* it seems parent cgroup doesn't have enough mem */
1989                 if (ret == -ENOMEM)
1990                         goto try_to_free;
1991                 cond_resched();
1992         }
1993         ret = 0;
1994 out:
1995         css_put(&mem->css);
1996         return ret;
1997
1998 try_to_free:
1999         /* returns EBUSY if there is a task or if we come here twice. */
2000         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
2001                 ret = -EBUSY;
2002                 goto out;
2003         }
2004         /* we call try-to-free pages for make this cgroup empty */
2005         lru_add_drain_all();
2006         /* try to free all pages in this cgroup */
2007         shrink = 1;
2008         while (nr_retries && mem->res.usage > 0) {
2009                 int progress;
2010
2011                 if (signal_pending(current)) {
2012                         ret = -EINTR;
2013                         goto out;
2014                 }
2015                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
2016                                                 false, get_swappiness(mem));
2017                 if (!progress) {
2018                         nr_retries--;
2019                         /* maybe some writeback is necessary */
2020                         congestion_wait(BLK_RW_ASYNC, HZ/10);
2021                 }
2022
2023         }
2024         lru_add_drain();
2025         /* try move_account...there may be some *locked* pages. */
2026         if (mem->res.usage)
2027                 goto move_account;
2028         ret = 0;
2029         goto out;
2030 }
2031
2032 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
2033 {
2034         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
2035 }
2036
2037
2038 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
2039 {
2040         return mem_cgroup_from_cont(cont)->use_hierarchy;
2041 }
2042
2043 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
2044                                         u64 val)
2045 {
2046         int retval = 0;
2047         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2048         struct cgroup *parent = cont->parent;
2049         struct mem_cgroup *parent_mem = NULL;
2050
2051         if (parent)
2052                 parent_mem = mem_cgroup_from_cont(parent);
2053
2054         cgroup_lock();
2055         /*
2056          * If parent's use_hiearchy is set, we can't make any modifications
2057          * in the child subtrees. If it is unset, then the change can
2058          * occur, provided the current cgroup has no children.
2059          *
2060          * For the root cgroup, parent_mem is NULL, we allow value to be
2061          * set if there are no children.
2062          */
2063         if ((!parent_mem || !parent_mem->use_hierarchy) &&
2064                                 (val == 1 || val == 0)) {
2065                 if (list_empty(&cont->children))
2066                         mem->use_hierarchy = val;
2067                 else
2068                         retval = -EBUSY;
2069         } else
2070                 retval = -EINVAL;
2071         cgroup_unlock();
2072
2073         return retval;
2074 }
2075
2076 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
2077 {
2078         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2079         u64 val = 0;
2080         int type, name;
2081
2082         type = MEMFILE_TYPE(cft->private);
2083         name = MEMFILE_ATTR(cft->private);
2084         switch (type) {
2085         case _MEM:
2086                 val = res_counter_read_u64(&mem->res, name);
2087                 break;
2088         case _MEMSWAP:
2089                 val = res_counter_read_u64(&mem->memsw, name);
2090                 break;
2091         default:
2092                 BUG();
2093                 break;
2094         }
2095         return val;
2096 }
2097 /*
2098  * The user of this function is...
2099  * RES_LIMIT.
2100  */
2101 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
2102                             const char *buffer)
2103 {
2104         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2105         int type, name;
2106         unsigned long long val;
2107         int ret;
2108
2109         type = MEMFILE_TYPE(cft->private);
2110         name = MEMFILE_ATTR(cft->private);
2111         switch (name) {
2112         case RES_LIMIT:
2113                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2114                         ret = -EINVAL;
2115                         break;
2116                 }
2117                 /* This function does all necessary parse...reuse it */
2118                 ret = res_counter_memparse_write_strategy(buffer, &val);
2119                 if (ret)
2120                         break;
2121                 if (type == _MEM)
2122                         ret = mem_cgroup_resize_limit(memcg, val);
2123                 else
2124                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
2125                 break;
2126         default:
2127                 ret = -EINVAL; /* should be BUG() ? */
2128                 break;
2129         }
2130         return ret;
2131 }
2132
2133 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
2134                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
2135 {
2136         struct cgroup *cgroup;
2137         unsigned long long min_limit, min_memsw_limit, tmp;
2138
2139         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2140         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2141         cgroup = memcg->css.cgroup;
2142         if (!memcg->use_hierarchy)
2143                 goto out;
2144
2145         while (cgroup->parent) {
2146                 cgroup = cgroup->parent;
2147                 memcg = mem_cgroup_from_cont(cgroup);
2148                 if (!memcg->use_hierarchy)
2149                         break;
2150                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
2151                 min_limit = min(min_limit, tmp);
2152                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2153                 min_memsw_limit = min(min_memsw_limit, tmp);
2154         }
2155 out:
2156         *mem_limit = min_limit;
2157         *memsw_limit = min_memsw_limit;
2158         return;
2159 }
2160
2161 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2162 {
2163         struct mem_cgroup *mem;
2164         int type, name;
2165
2166         mem = mem_cgroup_from_cont(cont);
2167         type = MEMFILE_TYPE(event);
2168         name = MEMFILE_ATTR(event);
2169         switch (name) {
2170         case RES_MAX_USAGE:
2171                 if (type == _MEM)
2172                         res_counter_reset_max(&mem->res);
2173                 else
2174                         res_counter_reset_max(&mem->memsw);
2175                 break;
2176         case RES_FAILCNT:
2177                 if (type == _MEM)
2178                         res_counter_reset_failcnt(&mem->res);
2179                 else
2180                         res_counter_reset_failcnt(&mem->memsw);
2181                 break;
2182         }
2183         return 0;
2184 }
2185
2186
2187 /* For read statistics */
2188 enum {
2189         MCS_CACHE,
2190         MCS_RSS,
2191         MCS_MAPPED_FILE,
2192         MCS_PGPGIN,
2193         MCS_PGPGOUT,
2194         MCS_INACTIVE_ANON,
2195         MCS_ACTIVE_ANON,
2196         MCS_INACTIVE_FILE,
2197         MCS_ACTIVE_FILE,
2198         MCS_UNEVICTABLE,
2199         NR_MCS_STAT,
2200 };
2201
2202 struct mcs_total_stat {
2203         s64 stat[NR_MCS_STAT];
2204 };
2205
2206 struct {
2207         char *local_name;
2208         char *total_name;
2209 } memcg_stat_strings[NR_MCS_STAT] = {
2210         {"cache", "total_cache"},
2211         {"rss", "total_rss"},
2212         {"mapped_file", "total_mapped_file"},
2213         {"pgpgin", "total_pgpgin"},
2214         {"pgpgout", "total_pgpgout"},
2215         {"inactive_anon", "total_inactive_anon"},
2216         {"active_anon", "total_active_anon"},
2217         {"inactive_file", "total_inactive_file"},
2218         {"active_file", "total_active_file"},
2219         {"unevictable", "total_unevictable"}
2220 };
2221
2222
2223 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2224 {
2225         struct mcs_total_stat *s = data;
2226         s64 val;
2227
2228         /* per cpu stat */
2229         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2230         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2231         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2232         s->stat[MCS_RSS] += val * PAGE_SIZE;
2233         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
2234         s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
2235         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2236         s->stat[MCS_PGPGIN] += val;
2237         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2238         s->stat[MCS_PGPGOUT] += val;
2239
2240         /* per zone stat */
2241         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2242         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2243         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2244         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2245         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2246         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2247         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2248         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2249         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2250         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2251         return 0;
2252 }
2253
2254 static void
2255 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2256 {
2257         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2258 }
2259
2260 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2261                                  struct cgroup_map_cb *cb)
2262 {
2263         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2264         struct mcs_total_stat mystat;
2265         int i;
2266
2267         memset(&mystat, 0, sizeof(mystat));
2268         mem_cgroup_get_local_stat(mem_cont, &mystat);
2269
2270         for (i = 0; i < NR_MCS_STAT; i++)
2271                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2272
2273         /* Hierarchical information */
2274         {
2275                 unsigned long long limit, memsw_limit;
2276                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2277                 cb->fill(cb, "hierarchical_memory_limit", limit);
2278                 if (do_swap_account)
2279                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2280         }
2281
2282         memset(&mystat, 0, sizeof(mystat));
2283         mem_cgroup_get_total_stat(mem_cont, &mystat);
2284         for (i = 0; i < NR_MCS_STAT; i++)
2285                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2286
2287
2288 #ifdef CONFIG_DEBUG_VM
2289         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2290
2291         {
2292                 int nid, zid;
2293                 struct mem_cgroup_per_zone *mz;
2294                 unsigned long recent_rotated[2] = {0, 0};
2295                 unsigned long recent_scanned[2] = {0, 0};
2296
2297                 for_each_online_node(nid)
2298                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2299                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2300
2301                                 recent_rotated[0] +=
2302                                         mz->reclaim_stat.recent_rotated[0];
2303                                 recent_rotated[1] +=
2304                                         mz->reclaim_stat.recent_rotated[1];
2305                                 recent_scanned[0] +=
2306                                         mz->reclaim_stat.recent_scanned[0];
2307                                 recent_scanned[1] +=
2308                                         mz->reclaim_stat.recent_scanned[1];
2309                         }
2310                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2311                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2312                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2313                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2314         }
2315 #endif
2316
2317         return 0;
2318 }
2319
2320 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2321 {
2322         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2323
2324         return get_swappiness(memcg);
2325 }
2326
2327 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2328                                        u64 val)
2329 {
2330         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2331         struct mem_cgroup *parent;
2332
2333         if (val > 100)
2334                 return -EINVAL;
2335
2336         if (cgrp->parent == NULL)
2337                 return -EINVAL;
2338
2339         parent = mem_cgroup_from_cont(cgrp->parent);
2340
2341         cgroup_lock();
2342
2343         /* If under hierarchy, only empty-root can set this value */
2344         if ((parent->use_hierarchy) ||
2345             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2346                 cgroup_unlock();
2347                 return -EINVAL;
2348         }
2349
2350         spin_lock(&memcg->reclaim_param_lock);
2351         memcg->swappiness = val;
2352         spin_unlock(&memcg->reclaim_param_lock);
2353
2354         cgroup_unlock();
2355
2356         return 0;
2357 }
2358
2359
2360 static struct cftype mem_cgroup_files[] = {
2361         {
2362                 .name = "usage_in_bytes",
2363                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2364                 .read_u64 = mem_cgroup_read,
2365         },
2366         {
2367                 .name = "max_usage_in_bytes",
2368                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2369                 .trigger = mem_cgroup_reset,
2370                 .read_u64 = mem_cgroup_read,
2371         },
2372         {
2373                 .name = "limit_in_bytes",
2374                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2375                 .write_string = mem_cgroup_write,
2376                 .read_u64 = mem_cgroup_read,
2377         },
2378         {
2379                 .name = "failcnt",
2380                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2381                 .trigger = mem_cgroup_reset,
2382                 .read_u64 = mem_cgroup_read,
2383         },
2384         {
2385                 .name = "stat",
2386                 .read_map = mem_control_stat_show,
2387         },
2388         {
2389                 .name = "force_empty",
2390                 .trigger = mem_cgroup_force_empty_write,
2391         },
2392         {
2393                 .name = "use_hierarchy",
2394                 .write_u64 = mem_cgroup_hierarchy_write,
2395                 .read_u64 = mem_cgroup_hierarchy_read,
2396         },
2397         {
2398                 .name = "swappiness",
2399                 .read_u64 = mem_cgroup_swappiness_read,
2400                 .write_u64 = mem_cgroup_swappiness_write,
2401         },
2402 };
2403
2404 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2405 static struct cftype memsw_cgroup_files[] = {
2406         {
2407                 .name = "memsw.usage_in_bytes",
2408                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2409                 .read_u64 = mem_cgroup_read,
2410         },
2411         {
2412                 .name = "memsw.max_usage_in_bytes",
2413                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2414                 .trigger = mem_cgroup_reset,
2415                 .read_u64 = mem_cgroup_read,
2416         },
2417         {
2418                 .name = "memsw.limit_in_bytes",
2419                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2420                 .write_string = mem_cgroup_write,
2421                 .read_u64 = mem_cgroup_read,
2422         },
2423         {
2424                 .name = "memsw.failcnt",
2425                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2426                 .trigger = mem_cgroup_reset,
2427                 .read_u64 = mem_cgroup_read,
2428         },
2429 };
2430
2431 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2432 {
2433         if (!do_swap_account)
2434                 return 0;
2435         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2436                                 ARRAY_SIZE(memsw_cgroup_files));
2437 };
2438 #else
2439 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2440 {
2441         return 0;
2442 }
2443 #endif
2444
2445 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2446 {
2447         struct mem_cgroup_per_node *pn;
2448         struct mem_cgroup_per_zone *mz;
2449         enum lru_list l;
2450         int zone, tmp = node;
2451         /*
2452          * This routine is called against possible nodes.
2453          * But it's BUG to call kmalloc() against offline node.
2454          *
2455          * TODO: this routine can waste much memory for nodes which will
2456          *       never be onlined. It's better to use memory hotplug callback
2457          *       function.
2458          */
2459         if (!node_state(node, N_NORMAL_MEMORY))
2460                 tmp = -1;
2461         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2462         if (!pn)
2463                 return 1;
2464
2465         mem->info.nodeinfo[node] = pn;
2466         memset(pn, 0, sizeof(*pn));
2467
2468         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2469                 mz = &pn->zoneinfo[zone];
2470                 for_each_lru(l)
2471                         INIT_LIST_HEAD(&mz->lists[l]);
2472         }
2473         return 0;
2474 }
2475
2476 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2477 {
2478         kfree(mem->info.nodeinfo[node]);
2479 }
2480
2481 static int mem_cgroup_size(void)
2482 {
2483         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2484         return sizeof(struct mem_cgroup) + cpustat_size;
2485 }
2486
2487 static struct mem_cgroup *mem_cgroup_alloc(void)
2488 {
2489         struct mem_cgroup *mem;
2490         int size = mem_cgroup_size();
2491
2492         if (size < PAGE_SIZE)
2493                 mem = kmalloc(size, GFP_KERNEL);
2494         else
2495                 mem = vmalloc(size);
2496
2497         if (mem)
2498                 memset(mem, 0, size);
2499         return mem;
2500 }
2501
2502 /*
2503  * At destroying mem_cgroup, references from swap_cgroup can remain.
2504  * (scanning all at force_empty is too costly...)
2505  *
2506  * Instead of clearing all references at force_empty, we remember
2507  * the number of reference from swap_cgroup and free mem_cgroup when
2508  * it goes down to 0.
2509  *
2510  * Removal of cgroup itself succeeds regardless of refs from swap.
2511  */
2512
2513 static void __mem_cgroup_free(struct mem_cgroup *mem)
2514 {
2515         int node;
2516
2517         free_css_id(&mem_cgroup_subsys, &mem->css);
2518
2519         for_each_node_state(node, N_POSSIBLE)
2520                 free_mem_cgroup_per_zone_info(mem, node);
2521
2522         if (mem_cgroup_size() < PAGE_SIZE)
2523                 kfree(mem);
2524         else
2525                 vfree(mem);
2526 }
2527
2528 static void mem_cgroup_get(struct mem_cgroup *mem)
2529 {
2530         atomic_inc(&mem->refcnt);
2531 }
2532
2533 static void mem_cgroup_put(struct mem_cgroup *mem)
2534 {
2535         if (atomic_dec_and_test(&mem->refcnt)) {
2536                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
2537                 __mem_cgroup_free(mem);
2538                 if (parent)
2539                         mem_cgroup_put(parent);
2540         }
2541 }
2542
2543 /*
2544  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2545  */
2546 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2547 {
2548         if (!mem->res.parent)
2549                 return NULL;
2550         return mem_cgroup_from_res_counter(mem->res.parent, res);
2551 }
2552
2553 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2554 static void __init enable_swap_cgroup(void)
2555 {
2556         if (!mem_cgroup_disabled() && really_do_swap_account)
2557                 do_swap_account = 1;
2558 }
2559 #else
2560 static void __init enable_swap_cgroup(void)
2561 {
2562 }
2563 #endif
2564
2565 static struct cgroup_subsys_state * __ref
2566 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2567 {
2568         struct mem_cgroup *mem, *parent;
2569         long error = -ENOMEM;
2570         int node;
2571
2572         mem = mem_cgroup_alloc();
2573         if (!mem)
2574                 return ERR_PTR(error);
2575
2576         for_each_node_state(node, N_POSSIBLE)
2577                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2578                         goto free_out;
2579         /* root ? */
2580         if (cont->parent == NULL) {
2581                 enable_swap_cgroup();
2582                 parent = NULL;
2583                 root_mem_cgroup = mem;
2584         } else {
2585                 parent = mem_cgroup_from_cont(cont->parent);
2586                 mem->use_hierarchy = parent->use_hierarchy;
2587         }
2588
2589         if (parent && parent->use_hierarchy) {
2590                 res_counter_init(&mem->res, &parent->res);
2591                 res_counter_init(&mem->memsw, &parent->memsw);
2592                 /*
2593                  * We increment refcnt of the parent to ensure that we can
2594                  * safely access it on res_counter_charge/uncharge.
2595                  * This refcnt will be decremented when freeing this
2596                  * mem_cgroup(see mem_cgroup_put).
2597                  */
2598                 mem_cgroup_get(parent);
2599         } else {
2600                 res_counter_init(&mem->res, NULL);
2601                 res_counter_init(&mem->memsw, NULL);
2602         }
2603         mem->last_scanned_child = 0;
2604         spin_lock_init(&mem->reclaim_param_lock);
2605
2606         if (parent)
2607                 mem->swappiness = get_swappiness(parent);
2608         atomic_set(&mem->refcnt, 1);
2609         return &mem->css;
2610 free_out:
2611         __mem_cgroup_free(mem);
2612         root_mem_cgroup = NULL;
2613         return ERR_PTR(error);
2614 }
2615
2616 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2617                                         struct cgroup *cont)
2618 {
2619         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2620
2621         return mem_cgroup_force_empty(mem, false);
2622 }
2623
2624 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2625                                 struct cgroup *cont)
2626 {
2627         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2628
2629         mem_cgroup_put(mem);
2630 }
2631
2632 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2633                                 struct cgroup *cont)
2634 {
2635         int ret;
2636
2637         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2638                                 ARRAY_SIZE(mem_cgroup_files));
2639
2640         if (!ret)
2641                 ret = register_memsw_files(cont, ss);
2642         return ret;
2643 }
2644
2645 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2646                                 struct cgroup *cont,
2647                                 struct cgroup *old_cont,
2648                                 struct task_struct *p,
2649                                 bool threadgroup)
2650 {
2651         mutex_lock(&memcg_tasklist);
2652         /*
2653          * FIXME: It's better to move charges of this process from old
2654          * memcg to new memcg. But it's just on TODO-List now.
2655          */
2656         mutex_unlock(&memcg_tasklist);
2657 }
2658
2659 struct cgroup_subsys mem_cgroup_subsys = {
2660         .name = "memory",
2661         .subsys_id = mem_cgroup_subsys_id,
2662         .create = mem_cgroup_create,
2663         .pre_destroy = mem_cgroup_pre_destroy,
2664         .destroy = mem_cgroup_destroy,
2665         .populate = mem_cgroup_populate,
2666         .attach = mem_cgroup_move_task,
2667         .early_init = 0,
2668         .use_id = 1,
2669 };
2670
2671 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2672
2673 static int __init disable_swap_account(char *s)
2674 {
2675         really_do_swap_account = 0;
2676         return 1;
2677 }
2678 __setup("noswapaccount", disable_swap_account);
2679 #endif